diff --git a/.gitignore b/.gitignore index 5b70dc3662ea983054c3378d0679c1d49b7348ba..5d1dfd91c7806a1a9816518dd5e053afbe0565ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ test_save.py +*.pyc \ No newline at end of file diff --git a/Image/AlexNet/code/train.log b/Image/AlexNet/code/train.log new file mode 100644 index 0000000000000000000000000000000000000000..3c624fa1e5206b094e3ac1a3b588fb1cb436630e --- /dev/null +++ b/Image/AlexNet/code/train.log @@ -0,0 +1,503 @@ +2025-03-09 19:50:57,307 - train - INFO - 开始训练 alexnet +2025-03-09 19:50:57,308 - train - INFO - 总轮数: 100, 学习率: 0.1, 设备: cuda:2 +2025-03-09 19:50:57,941 - train - INFO - Epoch: 1 | Batch: 0 | Loss: 2.303 | Acc: 9.38% +2025-03-09 19:50:59,844 - train - INFO - Epoch: 1 | Batch: 100 | Loss: 2.300 | Acc: 10.04% +2025-03-09 19:51:01,901 - train - INFO - Epoch: 1 | Batch: 200 | Loss: 2.227 | Acc: 13.40% +2025-03-09 19:51:03,899 - train - INFO - Epoch: 1 | Batch: 300 | Loss: 2.162 | Acc: 15.09% +2025-03-09 19:51:07,107 - train - INFO - Epoch: 1 | Test Loss: 1.896 | Test Acc: 19.86% +2025-03-09 19:51:07,264 - train - INFO - Epoch: 2 | Batch: 0 | Loss: 1.888 | Acc: 16.41% +2025-03-09 19:51:09,345 - train - INFO - Epoch: 2 | Batch: 100 | Loss: 1.929 | Acc: 21.02% +2025-03-09 19:51:11,452 - train - INFO - Epoch: 2 | Batch: 200 | Loss: 1.917 | Acc: 21.42% +2025-03-09 19:51:13,434 - train - INFO - Epoch: 2 | Batch: 300 | Loss: 1.911 | Acc: 21.87% +2025-03-09 19:51:16,541 - train - INFO - Epoch: 2 | Test Loss: 1.840 | Test Acc: 25.23% +2025-03-09 19:51:16,704 - train - INFO - Epoch: 3 | Batch: 0 | Loss: 1.938 | Acc: 17.97% +2025-03-09 19:51:18,746 - train - INFO - Epoch: 3 | Batch: 100 | Loss: 1.837 | Acc: 26.49% +2025-03-09 19:51:20,674 - train - INFO - Epoch: 3 | Batch: 200 | Loss: 1.812 | Acc: 27.79% +2025-03-09 19:51:22,577 - train - INFO - Epoch: 3 | Batch: 300 | Loss: 1.798 | Acc: 28.75% +2025-03-09 19:51:25,741 - train - INFO - Epoch: 3 | Test Loss: 1.613 | Test Acc: 38.13% +2025-03-09 19:51:25,920 - train - INFO - Epoch: 4 | Batch: 0 | Loss: 1.613 | Acc: 32.81% +2025-03-09 19:51:28,016 - train - INFO - Epoch: 4 | Batch: 100 | Loss: 1.683 | Acc: 34.67% +2025-03-09 19:51:30,038 - train - INFO - Epoch: 4 | Batch: 200 | Loss: 1.668 | Acc: 35.46% +2025-03-09 19:51:32,131 - train - INFO - Epoch: 4 | Batch: 300 | Loss: 1.675 | Acc: 36.00% +2025-03-09 19:51:35,398 - train - INFO - Epoch: 4 | Test Loss: 1.624 | Test Acc: 39.42% +2025-03-09 19:51:43,699 - train - INFO - Epoch: 5 | Batch: 0 | Loss: 1.684 | Acc: 39.06% +2025-03-09 19:51:45,670 - train - INFO - Epoch: 5 | Batch: 100 | Loss: 1.638 | Acc: 39.09% +2025-03-09 19:51:47,561 - train - INFO - Epoch: 5 | Batch: 200 | Loss: 1.623 | Acc: 39.87% +2025-03-09 19:51:49,399 - train - INFO - Epoch: 5 | Batch: 300 | Loss: 1.614 | Acc: 40.60% +2025-03-09 19:51:52,227 - train - INFO - Epoch: 5 | Test Loss: 1.487 | Test Acc: 45.98% +2025-03-09 19:51:52,392 - train - INFO - Epoch: 6 | Batch: 0 | Loss: 1.565 | Acc: 45.31% +2025-03-09 19:51:54,259 - train - INFO - Epoch: 6 | Batch: 100 | Loss: 1.550 | Acc: 44.09% +2025-03-09 19:51:56,191 - train - INFO - Epoch: 6 | Batch: 200 | Loss: 1.551 | Acc: 44.07% +2025-03-09 19:51:58,102 - train - INFO - Epoch: 6 | Batch: 300 | Loss: 1.552 | Acc: 44.16% +2025-03-09 19:52:00,982 - train - INFO - Epoch: 6 | Test Loss: 1.498 | Test Acc: 46.14% +2025-03-09 19:52:01,147 - train - INFO - Epoch: 7 | Batch: 0 | Loss: 1.703 | Acc: 35.94% +2025-03-09 19:52:03,112 - train - INFO - Epoch: 7 | Batch: 100 | Loss: 1.549 | Acc: 44.65% +2025-03-09 19:52:05,091 - train - INFO - Epoch: 7 | Batch: 200 | Loss: 1.558 | Acc: 44.57% +2025-03-09 19:52:07,124 - train - INFO - Epoch: 7 | Batch: 300 | Loss: 1.546 | Acc: 44.95% +2025-03-09 19:52:10,122 - train - INFO - Epoch: 7 | Test Loss: 1.463 | Test Acc: 48.95% +2025-03-09 19:52:10,282 - train - INFO - Epoch: 8 | Batch: 0 | Loss: 1.559 | Acc: 46.88% +2025-03-09 19:52:12,269 - train - INFO - Epoch: 8 | Batch: 100 | Loss: 1.522 | Acc: 45.61% +2025-03-09 19:52:14,191 - train - INFO - Epoch: 8 | Batch: 200 | Loss: 1.515 | Acc: 46.19% +2025-03-09 19:52:16,026 - train - INFO - Epoch: 8 | Batch: 300 | Loss: 1.512 | Acc: 46.34% +2025-03-09 19:52:18,972 - train - INFO - Epoch: 8 | Test Loss: 1.414 | Test Acc: 51.07% +2025-03-09 19:52:27,269 - train - INFO - Epoch: 9 | Batch: 0 | Loss: 1.405 | Acc: 50.00% +2025-03-09 19:52:29,140 - train - INFO - Epoch: 9 | Batch: 100 | Loss: 1.506 | Acc: 46.11% +2025-03-09 19:52:31,111 - train - INFO - Epoch: 9 | Batch: 200 | Loss: 1.495 | Acc: 46.85% +2025-03-09 19:52:33,061 - train - INFO - Epoch: 9 | Batch: 300 | Loss: 1.500 | Acc: 46.91% +2025-03-09 19:52:36,118 - train - INFO - Epoch: 9 | Test Loss: 1.540 | Test Acc: 46.20% +2025-03-09 19:52:36,307 - train - INFO - Epoch: 10 | Batch: 0 | Loss: 1.710 | Acc: 42.97% +2025-03-09 19:52:38,571 - train - INFO - Epoch: 10 | Batch: 100 | Loss: 1.515 | Acc: 47.08% +2025-03-09 19:52:40,615 - train - INFO - Epoch: 10 | Batch: 200 | Loss: 1.506 | Acc: 47.12% +2025-03-09 19:52:42,631 - train - INFO - Epoch: 10 | Batch: 300 | Loss: 1.498 | Acc: 47.72% +2025-03-09 19:52:45,697 - train - INFO - Epoch: 10 | Test Loss: 1.359 | Test Acc: 51.08% +2025-03-09 19:52:45,908 - train - INFO - Epoch: 11 | Batch: 0 | Loss: 1.288 | Acc: 48.44% +2025-03-09 19:52:48,156 - train - INFO - Epoch: 11 | Batch: 100 | Loss: 1.493 | Acc: 47.49% +2025-03-09 19:52:50,122 - train - INFO - Epoch: 11 | Batch: 200 | Loss: 1.483 | Acc: 47.77% +2025-03-09 19:52:52,086 - train - INFO - Epoch: 11 | Batch: 300 | Loss: 1.482 | Acc: 47.71% +2025-03-09 19:52:55,093 - train - INFO - Epoch: 11 | Test Loss: 1.425 | Test Acc: 49.77% +2025-03-09 19:52:55,255 - train - INFO - Epoch: 12 | Batch: 0 | Loss: 1.601 | Acc: 38.28% +2025-03-09 19:52:57,285 - train - INFO - Epoch: 12 | Batch: 100 | Loss: 1.495 | Acc: 47.03% +2025-03-09 19:52:59,361 - train - INFO - Epoch: 12 | Batch: 200 | Loss: 1.484 | Acc: 47.75% +2025-03-09 19:53:01,286 - train - INFO - Epoch: 12 | Batch: 300 | Loss: 1.474 | Acc: 48.21% +2025-03-09 19:53:04,233 - train - INFO - Epoch: 12 | Test Loss: 1.395 | Test Acc: 50.04% +2025-03-09 19:53:13,654 - train - INFO - Epoch: 13 | Batch: 0 | Loss: 1.349 | Acc: 54.69% +2025-03-09 19:53:16,010 - train - INFO - Epoch: 13 | Batch: 100 | Loss: 1.476 | Acc: 47.81% +2025-03-09 19:53:18,014 - train - INFO - Epoch: 13 | Batch: 200 | Loss: 1.459 | Acc: 48.68% +2025-03-09 19:53:19,910 - train - INFO - Epoch: 13 | Batch: 300 | Loss: 1.463 | Acc: 48.69% +2025-03-09 19:53:22,970 - train - INFO - Epoch: 13 | Test Loss: 1.433 | Test Acc: 50.43% +2025-03-09 19:53:23,123 - train - INFO - Epoch: 14 | Batch: 0 | Loss: 1.438 | Acc: 48.44% +2025-03-09 19:53:25,027 - train - INFO - Epoch: 14 | Batch: 100 | Loss: 1.490 | Acc: 47.90% +2025-03-09 19:53:27,032 - train - INFO - Epoch: 14 | Batch: 200 | Loss: 1.478 | Acc: 48.26% +2025-03-09 19:53:28,990 - train - INFO - Epoch: 14 | Batch: 300 | Loss: 1.473 | Acc: 48.42% +2025-03-09 19:53:31,930 - train - INFO - Epoch: 14 | Test Loss: 1.419 | Test Acc: 50.80% +2025-03-09 19:53:32,132 - train - INFO - Epoch: 15 | Batch: 0 | Loss: 1.496 | Acc: 49.22% +2025-03-09 19:53:34,072 - train - INFO - Epoch: 15 | Batch: 100 | Loss: 1.468 | Acc: 49.19% +2025-03-09 19:53:36,080 - train - INFO - Epoch: 15 | Batch: 200 | Loss: 1.473 | Acc: 49.07% +2025-03-09 19:53:38,079 - train - INFO - Epoch: 15 | Batch: 300 | Loss: 1.468 | Acc: 49.20% +2025-03-09 19:53:41,161 - train - INFO - Epoch: 15 | Test Loss: 1.424 | Test Acc: 52.09% +2025-03-09 19:53:41,356 - train - INFO - Epoch: 16 | Batch: 0 | Loss: 1.368 | Acc: 46.88% +2025-03-09 19:53:43,498 - train - INFO - Epoch: 16 | Batch: 100 | Loss: 1.485 | Acc: 48.47% +2025-03-09 19:53:45,625 - train - INFO - Epoch: 16 | Batch: 200 | Loss: 1.475 | Acc: 48.40% +2025-03-09 19:53:47,626 - train - INFO - Epoch: 16 | Batch: 300 | Loss: 1.468 | Acc: 48.62% +2025-03-09 19:53:50,536 - train - INFO - Epoch: 16 | Test Loss: 1.368 | Test Acc: 54.06% +2025-03-09 19:53:58,981 - train - INFO - Epoch: 17 | Batch: 0 | Loss: 1.322 | Acc: 48.44% +2025-03-09 19:54:01,050 - train - INFO - Epoch: 17 | Batch: 100 | Loss: 1.453 | Acc: 50.21% +2025-03-09 19:54:03,003 - train - INFO - Epoch: 17 | Batch: 200 | Loss: 1.445 | Acc: 50.24% +2025-03-09 19:54:05,016 - train - INFO - Epoch: 17 | Batch: 300 | Loss: 1.442 | Acc: 50.14% +2025-03-09 19:54:08,407 - train - INFO - Epoch: 17 | Test Loss: 1.427 | Test Acc: 50.52% +2025-03-09 19:54:08,577 - train - INFO - Epoch: 18 | Batch: 0 | Loss: 1.677 | Acc: 39.84% +2025-03-09 19:54:11,118 - train - INFO - Epoch: 18 | Batch: 100 | Loss: 1.466 | Acc: 49.07% +2025-03-09 19:54:13,136 - train - INFO - Epoch: 18 | Batch: 200 | Loss: 1.459 | Acc: 49.04% +2025-03-09 19:54:15,032 - train - INFO - Epoch: 18 | Batch: 300 | Loss: 1.450 | Acc: 49.59% +2025-03-09 19:54:18,113 - train - INFO - Epoch: 18 | Test Loss: 1.461 | Test Acc: 51.56% +2025-03-09 19:54:18,278 - train - INFO - Epoch: 19 | Batch: 0 | Loss: 1.473 | Acc: 53.91% +2025-03-09 19:54:20,263 - train - INFO - Epoch: 19 | Batch: 100 | Loss: 1.457 | Acc: 49.16% +2025-03-09 19:54:22,414 - train - INFO - Epoch: 19 | Batch: 200 | Loss: 1.433 | Acc: 50.05% +2025-03-09 19:54:24,518 - train - INFO - Epoch: 19 | Batch: 300 | Loss: 1.427 | Acc: 50.53% +2025-03-09 19:54:27,682 - train - INFO - Epoch: 19 | Test Loss: 1.490 | Test Acc: 53.65% +2025-03-09 19:54:27,849 - train - INFO - Epoch: 20 | Batch: 0 | Loss: 1.817 | Acc: 46.09% +2025-03-09 19:54:29,974 - train - INFO - Epoch: 20 | Batch: 100 | Loss: 1.438 | Acc: 50.66% +2025-03-09 19:54:32,009 - train - INFO - Epoch: 20 | Batch: 200 | Loss: 1.428 | Acc: 50.77% +2025-03-09 19:54:34,136 - train - INFO - Epoch: 20 | Batch: 300 | Loss: 1.427 | Acc: 50.63% +2025-03-09 19:54:37,134 - train - INFO - Epoch: 20 | Test Loss: 1.343 | Test Acc: 54.50% +2025-03-09 19:54:45,895 - train - INFO - Epoch: 21 | Batch: 0 | Loss: 1.165 | Acc: 61.72% +2025-03-09 19:54:48,130 - train - INFO - Epoch: 21 | Batch: 100 | Loss: 1.424 | Acc: 49.71% +2025-03-09 19:54:50,095 - train - INFO - Epoch: 21 | Batch: 200 | Loss: 1.431 | Acc: 50.06% +2025-03-09 19:54:52,237 - train - INFO - Epoch: 21 | Batch: 300 | Loss: 1.431 | Acc: 50.08% +2025-03-09 19:54:55,342 - train - INFO - Epoch: 21 | Test Loss: 1.527 | Test Acc: 53.13% +2025-03-09 19:54:55,542 - train - INFO - Epoch: 22 | Batch: 0 | Loss: 1.576 | Acc: 50.78% +2025-03-09 19:54:57,589 - train - INFO - Epoch: 22 | Batch: 100 | Loss: 1.439 | Acc: 50.59% +2025-03-09 19:54:59,618 - train - INFO - Epoch: 22 | Batch: 200 | Loss: 1.443 | Acc: 50.26% +2025-03-09 19:55:01,786 - train - INFO - Epoch: 22 | Batch: 300 | Loss: 1.447 | Acc: 50.06% +2025-03-09 19:55:04,883 - train - INFO - Epoch: 22 | Test Loss: 1.385 | Test Acc: 52.66% +2025-03-09 19:55:05,110 - train - INFO - Epoch: 23 | Batch: 0 | Loss: 1.361 | Acc: 49.22% +2025-03-09 19:55:07,153 - train - INFO - Epoch: 23 | Batch: 100 | Loss: 1.446 | Acc: 48.96% +2025-03-09 19:55:09,130 - train - INFO - Epoch: 23 | Batch: 200 | Loss: 1.446 | Acc: 49.48% +2025-03-09 19:55:11,103 - train - INFO - Epoch: 23 | Batch: 300 | Loss: 1.436 | Acc: 50.26% +2025-03-09 19:55:14,018 - train - INFO - Epoch: 23 | Test Loss: 1.312 | Test Acc: 54.84% +2025-03-09 19:55:14,202 - train - INFO - Epoch: 24 | Batch: 0 | Loss: 1.346 | Acc: 50.78% +2025-03-09 19:55:16,194 - train - INFO - Epoch: 24 | Batch: 100 | Loss: 1.455 | Acc: 48.91% +2025-03-09 19:55:18,163 - train - INFO - Epoch: 24 | Batch: 200 | Loss: 1.440 | Acc: 49.81% +2025-03-09 19:55:20,197 - train - INFO - Epoch: 24 | Batch: 300 | Loss: 1.437 | Acc: 50.04% +2025-03-09 19:55:23,241 - train - INFO - Epoch: 24 | Test Loss: 1.458 | Test Acc: 51.68% +2025-03-09 19:55:32,077 - train - INFO - Epoch: 25 | Batch: 0 | Loss: 1.432 | Acc: 54.69% +2025-03-09 19:55:34,182 - train - INFO - Epoch: 25 | Batch: 100 | Loss: 1.410 | Acc: 51.34% +2025-03-09 19:55:36,201 - train - INFO - Epoch: 25 | Batch: 200 | Loss: 1.422 | Acc: 51.18% +2025-03-09 19:55:38,082 - train - INFO - Epoch: 25 | Batch: 300 | Loss: 1.414 | Acc: 51.21% +2025-03-09 19:55:41,003 - train - INFO - Epoch: 25 | Test Loss: 1.407 | Test Acc: 53.10% +2025-03-09 19:55:41,169 - train - INFO - Epoch: 26 | Batch: 0 | Loss: 1.408 | Acc: 53.91% +2025-03-09 19:55:43,071 - train - INFO - Epoch: 26 | Batch: 100 | Loss: 1.397 | Acc: 51.52% +2025-03-09 19:55:45,032 - train - INFO - Epoch: 26 | Batch: 200 | Loss: 1.416 | Acc: 51.12% +2025-03-09 19:55:47,033 - train - INFO - Epoch: 26 | Batch: 300 | Loss: 1.431 | Acc: 50.55% +2025-03-09 19:55:50,303 - train - INFO - Epoch: 26 | Test Loss: 1.337 | Test Acc: 53.87% +2025-03-09 19:55:50,467 - train - INFO - Epoch: 27 | Batch: 0 | Loss: 1.436 | Acc: 52.34% +2025-03-09 19:55:52,603 - train - INFO - Epoch: 27 | Batch: 100 | Loss: 1.400 | Acc: 51.96% +2025-03-09 19:55:54,700 - train - INFO - Epoch: 27 | Batch: 200 | Loss: 1.415 | Acc: 51.58% +2025-03-09 19:55:56,931 - train - INFO - Epoch: 27 | Batch: 300 | Loss: 1.403 | Acc: 51.88% +2025-03-09 19:56:00,315 - train - INFO - Epoch: 27 | Test Loss: 1.284 | Test Acc: 57.01% +2025-03-09 19:56:00,490 - train - INFO - Epoch: 28 | Batch: 0 | Loss: 1.196 | Acc: 53.91% +2025-03-09 19:56:02,614 - train - INFO - Epoch: 28 | Batch: 100 | Loss: 1.402 | Acc: 51.76% +2025-03-09 19:56:04,933 - train - INFO - Epoch: 28 | Batch: 200 | Loss: 1.408 | Acc: 51.64% +2025-03-09 19:56:06,826 - train - INFO - Epoch: 28 | Batch: 300 | Loss: 1.415 | Acc: 51.37% +2025-03-09 19:56:09,723 - train - INFO - Epoch: 28 | Test Loss: 1.512 | Test Acc: 49.97% +2025-03-09 19:56:18,095 - train - INFO - Epoch: 29 | Batch: 0 | Loss: 1.663 | Acc: 49.22% +2025-03-09 19:56:20,071 - train - INFO - Epoch: 29 | Batch: 100 | Loss: 1.450 | Acc: 49.74% +2025-03-09 19:56:22,046 - train - INFO - Epoch: 29 | Batch: 200 | Loss: 1.418 | Acc: 51.07% +2025-03-09 19:56:23,941 - train - INFO - Epoch: 29 | Batch: 300 | Loss: 1.418 | Acc: 51.21% +2025-03-09 19:56:27,201 - train - INFO - Epoch: 29 | Test Loss: 1.399 | Test Acc: 50.57% +2025-03-09 19:56:27,418 - train - INFO - Epoch: 30 | Batch: 0 | Loss: 1.487 | Acc: 50.78% +2025-03-09 19:56:29,631 - train - INFO - Epoch: 30 | Batch: 100 | Loss: 1.407 | Acc: 52.09% +2025-03-09 19:56:31,749 - train - INFO - Epoch: 30 | Batch: 200 | Loss: 1.405 | Acc: 52.03% +2025-03-09 19:56:34,079 - train - INFO - Epoch: 30 | Batch: 300 | Loss: 1.409 | Acc: 51.79% +2025-03-09 19:56:37,224 - train - INFO - Epoch: 30 | Test Loss: 1.353 | Test Acc: 51.54% +2025-03-09 19:56:37,407 - train - INFO - Epoch: 31 | Batch: 0 | Loss: 1.319 | Acc: 50.78% +2025-03-09 19:56:39,290 - train - INFO - Epoch: 31 | Batch: 100 | Loss: 1.408 | Acc: 52.58% +2025-03-09 19:56:41,278 - train - INFO - Epoch: 31 | Batch: 200 | Loss: 1.403 | Acc: 52.27% +2025-03-09 19:56:43,239 - train - INFO - Epoch: 31 | Batch: 300 | Loss: 1.405 | Acc: 52.06% +2025-03-09 19:56:46,411 - train - INFO - Epoch: 31 | Test Loss: 1.437 | Test Acc: 50.89% +2025-03-09 19:56:46,625 - train - INFO - Epoch: 32 | Batch: 0 | Loss: 1.625 | Acc: 38.28% +2025-03-09 19:56:48,617 - train - INFO - Epoch: 32 | Batch: 100 | Loss: 1.408 | Acc: 52.27% +2025-03-09 19:56:50,612 - train - INFO - Epoch: 32 | Batch: 200 | Loss: 1.404 | Acc: 52.34% +2025-03-09 19:56:52,563 - train - INFO - Epoch: 32 | Batch: 300 | Loss: 1.419 | Acc: 51.68% +2025-03-09 19:56:55,759 - train - INFO - Epoch: 32 | Test Loss: 1.462 | Test Acc: 49.63% +2025-03-09 19:57:04,788 - train - INFO - Epoch: 33 | Batch: 0 | Loss: 1.380 | Acc: 50.78% +2025-03-09 19:57:06,775 - train - INFO - Epoch: 33 | Batch: 100 | Loss: 1.404 | Acc: 52.44% +2025-03-09 19:57:08,717 - train - INFO - Epoch: 33 | Batch: 200 | Loss: 1.401 | Acc: 52.40% +2025-03-09 19:57:10,621 - train - INFO - Epoch: 33 | Batch: 300 | Loss: 1.402 | Acc: 52.39% +2025-03-09 19:57:13,611 - train - INFO - Epoch: 33 | Test Loss: 1.429 | Test Acc: 50.51% +2025-03-09 19:57:13,780 - train - INFO - Epoch: 34 | Batch: 0 | Loss: 1.457 | Acc: 45.31% +2025-03-09 19:57:15,716 - train - INFO - Epoch: 34 | Batch: 100 | Loss: 1.382 | Acc: 52.42% +2025-03-09 19:57:17,568 - train - INFO - Epoch: 34 | Batch: 200 | Loss: 1.369 | Acc: 53.09% +2025-03-09 19:57:19,467 - train - INFO - Epoch: 34 | Batch: 300 | Loss: 1.385 | Acc: 52.48% +2025-03-09 19:57:22,494 - train - INFO - Epoch: 34 | Test Loss: 1.269 | Test Acc: 58.16% +2025-03-09 19:57:22,652 - train - INFO - Epoch: 35 | Batch: 0 | Loss: 1.363 | Acc: 50.00% +2025-03-09 19:57:24,637 - train - INFO - Epoch: 35 | Batch: 100 | Loss: 1.370 | Acc: 53.33% +2025-03-09 19:57:26,607 - train - INFO - Epoch: 35 | Batch: 200 | Loss: 1.357 | Acc: 53.85% +2025-03-09 19:57:28,609 - train - INFO - Epoch: 35 | Batch: 300 | Loss: 1.376 | Acc: 53.20% +2025-03-09 19:57:31,872 - train - INFO - Epoch: 35 | Test Loss: 1.374 | Test Acc: 52.82% +2025-03-09 19:57:32,038 - train - INFO - Epoch: 36 | Batch: 0 | Loss: 1.409 | Acc: 48.44% +2025-03-09 19:57:34,309 - train - INFO - Epoch: 36 | Batch: 100 | Loss: 1.380 | Acc: 53.10% +2025-03-09 19:57:36,495 - train - INFO - Epoch: 36 | Batch: 200 | Loss: 1.412 | Acc: 52.32% +2025-03-09 19:57:38,858 - train - INFO - Epoch: 36 | Batch: 300 | Loss: 1.407 | Acc: 52.43% +2025-03-09 19:57:41,965 - train - INFO - Epoch: 36 | Test Loss: 1.315 | Test Acc: 53.89% +2025-03-09 19:57:50,440 - train - INFO - Epoch: 37 | Batch: 0 | Loss: 1.449 | Acc: 54.69% +2025-03-09 19:57:52,335 - train - INFO - Epoch: 37 | Batch: 100 | Loss: 1.406 | Acc: 52.27% +2025-03-09 19:57:54,375 - train - INFO - Epoch: 37 | Batch: 200 | Loss: 1.402 | Acc: 52.22% +2025-03-09 19:57:56,397 - train - INFO - Epoch: 37 | Batch: 300 | Loss: 1.402 | Acc: 51.95% +2025-03-09 19:57:59,642 - train - INFO - Epoch: 37 | Test Loss: 1.317 | Test Acc: 54.21% +2025-03-09 19:57:59,815 - train - INFO - Epoch: 38 | Batch: 0 | Loss: 1.396 | Acc: 53.91% +2025-03-09 19:58:01,888 - train - INFO - Epoch: 38 | Batch: 100 | Loss: 1.402 | Acc: 52.91% +2025-03-09 19:58:04,248 - train - INFO - Epoch: 38 | Batch: 200 | Loss: 1.390 | Acc: 53.00% +2025-03-09 19:58:06,220 - train - INFO - Epoch: 38 | Batch: 300 | Loss: 1.384 | Acc: 52.99% +2025-03-09 19:58:09,163 - train - INFO - Epoch: 38 | Test Loss: 1.377 | Test Acc: 53.56% +2025-03-09 19:58:09,344 - train - INFO - Epoch: 39 | Batch: 0 | Loss: 1.318 | Acc: 56.25% +2025-03-09 19:58:11,213 - train - INFO - Epoch: 39 | Batch: 100 | Loss: 1.357 | Acc: 53.60% +2025-03-09 19:58:13,162 - train - INFO - Epoch: 39 | Batch: 200 | Loss: 1.359 | Acc: 53.66% +2025-03-09 19:58:15,049 - train - INFO - Epoch: 39 | Batch: 300 | Loss: 1.365 | Acc: 53.45% +2025-03-09 19:58:17,934 - train - INFO - Epoch: 39 | Test Loss: 1.387 | Test Acc: 53.77% +2025-03-09 19:58:18,107 - train - INFO - Epoch: 40 | Batch: 0 | Loss: 1.412 | Acc: 48.44% +2025-03-09 19:58:20,130 - train - INFO - Epoch: 40 | Batch: 100 | Loss: 1.366 | Acc: 53.23% +2025-03-09 19:58:22,068 - train - INFO - Epoch: 40 | Batch: 200 | Loss: 1.351 | Acc: 53.61% +2025-03-09 19:58:24,230 - train - INFO - Epoch: 40 | Batch: 300 | Loss: 1.354 | Acc: 53.64% +2025-03-09 19:58:27,405 - train - INFO - Epoch: 40 | Test Loss: 1.372 | Test Acc: 53.59% +2025-03-09 19:58:36,177 - train - INFO - Epoch: 41 | Batch: 0 | Loss: 1.538 | Acc: 44.53% +2025-03-09 19:58:38,243 - train - INFO - Epoch: 41 | Batch: 100 | Loss: 1.372 | Acc: 53.30% +2025-03-09 19:58:40,234 - train - INFO - Epoch: 41 | Batch: 200 | Loss: 1.364 | Acc: 53.72% +2025-03-09 19:58:42,245 - train - INFO - Epoch: 41 | Batch: 300 | Loss: 1.369 | Acc: 53.57% +2025-03-09 19:58:45,634 - train - INFO - Epoch: 41 | Test Loss: 1.296 | Test Acc: 54.74% +2025-03-09 19:58:45,798 - train - INFO - Epoch: 42 | Batch: 0 | Loss: 1.475 | Acc: 44.53% +2025-03-09 19:58:47,867 - train - INFO - Epoch: 42 | Batch: 100 | Loss: 1.368 | Acc: 52.49% +2025-03-09 19:58:50,126 - train - INFO - Epoch: 42 | Batch: 200 | Loss: 1.368 | Acc: 52.68% +2025-03-09 19:58:52,366 - train - INFO - Epoch: 42 | Batch: 300 | Loss: 1.377 | Acc: 52.70% +2025-03-09 19:58:55,621 - train - INFO - Epoch: 42 | Test Loss: 1.309 | Test Acc: 58.19% +2025-03-09 19:58:55,774 - train - INFO - Epoch: 43 | Batch: 0 | Loss: 1.291 | Acc: 61.72% +2025-03-09 19:58:57,733 - train - INFO - Epoch: 43 | Batch: 100 | Loss: 1.340 | Acc: 54.46% +2025-03-09 19:58:59,676 - train - INFO - Epoch: 43 | Batch: 200 | Loss: 1.329 | Acc: 54.83% +2025-03-09 19:59:01,636 - train - INFO - Epoch: 43 | Batch: 300 | Loss: 1.331 | Acc: 55.12% +2025-03-09 19:59:04,691 - train - INFO - Epoch: 43 | Test Loss: 1.373 | Test Acc: 53.69% +2025-03-09 19:59:04,836 - train - INFO - Epoch: 44 | Batch: 0 | Loss: 1.310 | Acc: 52.34% +2025-03-09 19:59:06,796 - train - INFO - Epoch: 44 | Batch: 100 | Loss: 1.373 | Acc: 53.46% +2025-03-09 19:59:08,711 - train - INFO - Epoch: 44 | Batch: 200 | Loss: 1.368 | Acc: 53.84% +2025-03-09 19:59:10,613 - train - INFO - Epoch: 44 | Batch: 300 | Loss: 1.362 | Acc: 53.90% +2025-03-09 19:59:13,399 - train - INFO - Epoch: 44 | Test Loss: 1.378 | Test Acc: 53.67% +2025-03-09 19:59:21,620 - train - INFO - Epoch: 45 | Batch: 0 | Loss: 1.383 | Acc: 58.59% +2025-03-09 19:59:23,458 - train - INFO - Epoch: 45 | Batch: 100 | Loss: 1.325 | Acc: 55.49% +2025-03-09 19:59:25,416 - train - INFO - Epoch: 45 | Batch: 200 | Loss: 1.345 | Acc: 54.83% +2025-03-09 19:59:27,390 - train - INFO - Epoch: 45 | Batch: 300 | Loss: 1.361 | Acc: 54.17% +2025-03-09 19:59:30,307 - train - INFO - Epoch: 45 | Test Loss: 1.345 | Test Acc: 54.53% +2025-03-09 19:59:30,478 - train - INFO - Epoch: 46 | Batch: 0 | Loss: 1.303 | Acc: 55.47% +2025-03-09 19:59:32,427 - train - INFO - Epoch: 46 | Batch: 100 | Loss: 1.342 | Acc: 54.19% +2025-03-09 19:59:34,336 - train - INFO - Epoch: 46 | Batch: 200 | Loss: 1.337 | Acc: 54.64% +2025-03-09 19:59:36,228 - train - INFO - Epoch: 46 | Batch: 300 | Loss: 1.351 | Acc: 54.22% +2025-03-09 19:59:39,262 - train - INFO - Epoch: 46 | Test Loss: 1.345 | Test Acc: 56.16% +2025-03-09 19:59:39,452 - train - INFO - Epoch: 47 | Batch: 0 | Loss: 1.489 | Acc: 51.56% +2025-03-09 19:59:41,279 - train - INFO - Epoch: 47 | Batch: 100 | Loss: 1.328 | Acc: 54.90% +2025-03-09 19:59:43,204 - train - INFO - Epoch: 47 | Batch: 200 | Loss: 1.327 | Acc: 55.05% +2025-03-09 19:59:45,120 - train - INFO - Epoch: 47 | Batch: 300 | Loss: 1.333 | Acc: 55.00% +2025-03-09 19:59:48,254 - train - INFO - Epoch: 47 | Test Loss: 1.314 | Test Acc: 56.75% +2025-03-09 19:59:48,434 - train - INFO - Epoch: 48 | Batch: 0 | Loss: 1.208 | Acc: 59.38% +2025-03-09 19:59:50,454 - train - INFO - Epoch: 48 | Batch: 100 | Loss: 1.317 | Acc: 55.10% +2025-03-09 19:59:52,374 - train - INFO - Epoch: 48 | Batch: 200 | Loss: 1.312 | Acc: 55.74% +2025-03-09 19:59:54,256 - train - INFO - Epoch: 48 | Batch: 300 | Loss: 1.309 | Acc: 55.89% +2025-03-09 19:59:57,327 - train - INFO - Epoch: 48 | Test Loss: 1.271 | Test Acc: 58.09% +2025-03-09 20:00:06,506 - train - INFO - Epoch: 49 | Batch: 0 | Loss: 1.332 | Acc: 57.81% +2025-03-09 20:00:08,781 - train - INFO - Epoch: 49 | Batch: 100 | Loss: 1.298 | Acc: 55.98% +2025-03-09 20:00:11,349 - train - INFO - Epoch: 49 | Batch: 200 | Loss: 1.302 | Acc: 56.07% +2025-03-09 20:00:13,500 - train - INFO - Epoch: 49 | Batch: 300 | Loss: 1.307 | Acc: 55.87% +2025-03-09 20:00:16,544 - train - INFO - Epoch: 49 | Test Loss: 1.286 | Test Acc: 54.88% +2025-03-09 20:00:16,712 - train - INFO - Epoch: 50 | Batch: 0 | Loss: 1.354 | Acc: 55.47% +2025-03-09 20:00:18,651 - train - INFO - Epoch: 50 | Batch: 100 | Loss: 1.353 | Acc: 53.57% +2025-03-09 20:00:20,584 - train - INFO - Epoch: 50 | Batch: 200 | Loss: 1.335 | Acc: 54.65% +2025-03-09 20:00:22,491 - train - INFO - Epoch: 50 | Batch: 300 | Loss: 1.333 | Acc: 54.62% +2025-03-09 20:00:25,512 - train - INFO - Epoch: 50 | Test Loss: 1.264 | Test Acc: 56.42% +2025-03-09 20:00:25,692 - train - INFO - Epoch: 51 | Batch: 0 | Loss: 1.259 | Acc: 57.81% +2025-03-09 20:00:27,737 - train - INFO - Epoch: 51 | Batch: 100 | Loss: 1.293 | Acc: 56.82% +2025-03-09 20:00:29,766 - train - INFO - Epoch: 51 | Batch: 200 | Loss: 1.300 | Acc: 56.34% +2025-03-09 20:00:31,756 - train - INFO - Epoch: 51 | Batch: 300 | Loss: 1.297 | Acc: 56.14% +2025-03-09 20:00:34,929 - train - INFO - Epoch: 51 | Test Loss: 1.223 | Test Acc: 57.27% +2025-03-09 20:00:35,118 - train - INFO - Epoch: 52 | Batch: 0 | Loss: 1.323 | Acc: 55.47% +2025-03-09 20:00:37,282 - train - INFO - Epoch: 52 | Batch: 100 | Loss: 1.299 | Acc: 55.91% +2025-03-09 20:00:39,484 - train - INFO - Epoch: 52 | Batch: 200 | Loss: 1.289 | Acc: 56.01% +2025-03-09 20:00:41,515 - train - INFO - Epoch: 52 | Batch: 300 | Loss: 1.293 | Acc: 55.98% +2025-03-09 20:00:44,482 - train - INFO - Epoch: 52 | Test Loss: 1.310 | Test Acc: 55.00% +2025-03-09 20:00:52,924 - train - INFO - Epoch: 53 | Batch: 0 | Loss: 1.438 | Acc: 50.00% +2025-03-09 20:00:55,121 - train - INFO - Epoch: 53 | Batch: 100 | Loss: 1.297 | Acc: 56.12% +2025-03-09 20:00:57,170 - train - INFO - Epoch: 53 | Batch: 200 | Loss: 1.306 | Acc: 55.61% +2025-03-09 20:00:59,086 - train - INFO - Epoch: 53 | Batch: 300 | Loss: 1.310 | Acc: 55.32% +2025-03-09 20:01:01,976 - train - INFO - Epoch: 53 | Test Loss: 1.293 | Test Acc: 55.28% +2025-03-09 20:01:02,134 - train - INFO - Epoch: 54 | Batch: 0 | Loss: 1.311 | Acc: 53.91% +2025-03-09 20:01:04,118 - train - INFO - Epoch: 54 | Batch: 100 | Loss: 1.335 | Acc: 54.83% +2025-03-09 20:01:06,123 - train - INFO - Epoch: 54 | Batch: 200 | Loss: 1.312 | Acc: 55.50% +2025-03-09 20:01:08,143 - train - INFO - Epoch: 54 | Batch: 300 | Loss: 1.296 | Acc: 55.97% +2025-03-09 20:01:11,377 - train - INFO - Epoch: 54 | Test Loss: 1.293 | Test Acc: 57.74% +2025-03-09 20:01:11,543 - train - INFO - Epoch: 55 | Batch: 0 | Loss: 1.194 | Acc: 58.59% +2025-03-09 20:01:13,621 - train - INFO - Epoch: 55 | Batch: 100 | Loss: 1.266 | Acc: 56.81% +2025-03-09 20:01:15,599 - train - INFO - Epoch: 55 | Batch: 200 | Loss: 1.267 | Acc: 57.00% +2025-03-09 20:01:17,646 - train - INFO - Epoch: 55 | Batch: 300 | Loss: 1.297 | Acc: 56.35% +2025-03-09 20:01:20,601 - train - INFO - Epoch: 55 | Test Loss: 1.288 | Test Acc: 56.27% +2025-03-09 20:01:20,778 - train - INFO - Epoch: 56 | Batch: 0 | Loss: 1.295 | Acc: 54.69% +2025-03-09 20:01:22,676 - train - INFO - Epoch: 56 | Batch: 100 | Loss: 1.306 | Acc: 55.91% +2025-03-09 20:01:24,633 - train - INFO - Epoch: 56 | Batch: 200 | Loss: 1.289 | Acc: 56.60% +2025-03-09 20:01:26,625 - train - INFO - Epoch: 56 | Batch: 300 | Loss: 1.299 | Acc: 56.09% +2025-03-09 20:01:29,991 - train - INFO - Epoch: 56 | Test Loss: 1.264 | Test Acc: 56.79% +2025-03-09 20:01:38,741 - train - INFO - Epoch: 57 | Batch: 0 | Loss: 1.413 | Acc: 53.12% +2025-03-09 20:01:40,895 - train - INFO - Epoch: 57 | Batch: 100 | Loss: 1.295 | Acc: 56.21% +2025-03-09 20:01:43,111 - train - INFO - Epoch: 57 | Batch: 200 | Loss: 1.287 | Acc: 56.34% +2025-03-09 20:01:45,114 - train - INFO - Epoch: 57 | Batch: 300 | Loss: 1.276 | Acc: 56.62% +2025-03-09 20:01:48,333 - train - INFO - Epoch: 57 | Test Loss: 1.283 | Test Acc: 56.47% +2025-03-09 20:01:48,483 - train - INFO - Epoch: 58 | Batch: 0 | Loss: 1.203 | Acc: 60.94% +2025-03-09 20:01:50,350 - train - INFO - Epoch: 58 | Batch: 100 | Loss: 1.265 | Acc: 57.16% +2025-03-09 20:01:52,319 - train - INFO - Epoch: 58 | Batch: 200 | Loss: 1.278 | Acc: 56.86% +2025-03-09 20:01:54,253 - train - INFO - Epoch: 58 | Batch: 300 | Loss: 1.278 | Acc: 56.76% +2025-03-09 20:01:57,139 - train - INFO - Epoch: 58 | Test Loss: 1.250 | Test Acc: 55.98% +2025-03-09 20:01:57,294 - train - INFO - Epoch: 59 | Batch: 0 | Loss: 1.299 | Acc: 54.69% +2025-03-09 20:01:59,177 - train - INFO - Epoch: 59 | Batch: 100 | Loss: 1.275 | Acc: 56.53% +2025-03-09 20:02:01,061 - train - INFO - Epoch: 59 | Batch: 200 | Loss: 1.264 | Acc: 57.11% +2025-03-09 20:02:03,013 - train - INFO - Epoch: 59 | Batch: 300 | Loss: 1.270 | Acc: 57.01% +2025-03-09 20:02:05,873 - train - INFO - Epoch: 59 | Test Loss: 1.175 | Test Acc: 60.14% +2025-03-09 20:02:06,084 - train - INFO - Epoch: 60 | Batch: 0 | Loss: 1.176 | Acc: 60.16% +2025-03-09 20:02:08,156 - train - INFO - Epoch: 60 | Batch: 100 | Loss: 1.276 | Acc: 56.69% +2025-03-09 20:02:10,235 - train - INFO - Epoch: 60 | Batch: 200 | Loss: 1.261 | Acc: 57.39% +2025-03-09 20:02:12,262 - train - INFO - Epoch: 60 | Batch: 300 | Loss: 1.272 | Acc: 56.96% +2025-03-09 20:02:15,547 - train - INFO - Epoch: 60 | Test Loss: 1.258 | Test Acc: 57.63% +2025-03-09 20:02:24,176 - train - INFO - Epoch: 61 | Batch: 0 | Loss: 1.181 | Acc: 63.28% +2025-03-09 20:02:26,274 - train - INFO - Epoch: 61 | Batch: 100 | Loss: 1.283 | Acc: 56.98% +2025-03-09 20:02:28,240 - train - INFO - Epoch: 61 | Batch: 200 | Loss: 1.267 | Acc: 57.38% +2025-03-09 20:02:30,146 - train - INFO - Epoch: 61 | Batch: 300 | Loss: 1.266 | Acc: 57.71% +2025-03-09 20:02:33,184 - train - INFO - Epoch: 61 | Test Loss: 1.185 | Test Acc: 57.88% +2025-03-09 20:02:33,361 - train - INFO - Epoch: 62 | Batch: 0 | Loss: 1.181 | Acc: 55.47% +2025-03-09 20:02:35,405 - train - INFO - Epoch: 62 | Batch: 100 | Loss: 1.237 | Acc: 58.36% +2025-03-09 20:02:37,431 - train - INFO - Epoch: 62 | Batch: 200 | Loss: 1.222 | Acc: 58.83% +2025-03-09 20:02:39,492 - train - INFO - Epoch: 62 | Batch: 300 | Loss: 1.225 | Acc: 58.54% +2025-03-09 20:02:42,962 - train - INFO - Epoch: 62 | Test Loss: 1.201 | Test Acc: 59.17% +2025-03-09 20:02:43,194 - train - INFO - Epoch: 63 | Batch: 0 | Loss: 1.167 | Acc: 57.03% +2025-03-09 20:02:45,381 - train - INFO - Epoch: 63 | Batch: 100 | Loss: 1.255 | Acc: 57.75% +2025-03-09 20:02:47,427 - train - INFO - Epoch: 63 | Batch: 200 | Loss: 1.251 | Acc: 57.57% +2025-03-09 20:02:49,782 - train - INFO - Epoch: 63 | Batch: 300 | Loss: 1.247 | Acc: 57.64% +2025-03-09 20:02:52,930 - train - INFO - Epoch: 63 | Test Loss: 1.212 | Test Acc: 59.46% +2025-03-09 20:02:53,093 - train - INFO - Epoch: 64 | Batch: 0 | Loss: 1.213 | Acc: 60.16% +2025-03-09 20:02:54,979 - train - INFO - Epoch: 64 | Batch: 100 | Loss: 1.238 | Acc: 58.62% +2025-03-09 20:02:56,924 - train - INFO - Epoch: 64 | Batch: 200 | Loss: 1.231 | Acc: 58.56% +2025-03-09 20:02:59,004 - train - INFO - Epoch: 64 | Batch: 300 | Loss: 1.235 | Acc: 58.42% +2025-03-09 20:03:02,027 - train - INFO - Epoch: 64 | Test Loss: 1.301 | Test Acc: 56.76% +2025-03-09 20:03:11,492 - train - INFO - Epoch: 65 | Batch: 0 | Loss: 1.440 | Acc: 59.38% +2025-03-09 20:03:13,589 - train - INFO - Epoch: 65 | Batch: 100 | Loss: 1.219 | Acc: 57.99% +2025-03-09 20:03:15,758 - train - INFO - Epoch: 65 | Batch: 200 | Loss: 1.214 | Acc: 58.48% +2025-03-09 20:03:17,811 - train - INFO - Epoch: 65 | Batch: 300 | Loss: 1.220 | Acc: 58.31% +2025-03-09 20:03:20,962 - train - INFO - Epoch: 65 | Test Loss: 1.133 | Test Acc: 62.39% +2025-03-09 20:03:21,151 - train - INFO - Epoch: 66 | Batch: 0 | Loss: 1.090 | Acc: 64.84% +2025-03-09 20:03:23,279 - train - INFO - Epoch: 66 | Batch: 100 | Loss: 1.212 | Acc: 58.54% +2025-03-09 20:03:25,314 - train - INFO - Epoch: 66 | Batch: 200 | Loss: 1.221 | Acc: 58.45% +2025-03-09 20:03:27,262 - train - INFO - Epoch: 66 | Batch: 300 | Loss: 1.220 | Acc: 58.67% +2025-03-09 20:03:30,250 - train - INFO - Epoch: 66 | Test Loss: 1.128 | Test Acc: 62.20% +2025-03-09 20:03:30,427 - train - INFO - Epoch: 67 | Batch: 0 | Loss: 1.194 | Acc: 62.50% +2025-03-09 20:03:32,409 - train - INFO - Epoch: 67 | Batch: 100 | Loss: 1.205 | Acc: 59.23% +2025-03-09 20:03:34,525 - train - INFO - Epoch: 67 | Batch: 200 | Loss: 1.219 | Acc: 58.96% +2025-03-09 20:03:36,669 - train - INFO - Epoch: 67 | Batch: 300 | Loss: 1.224 | Acc: 58.88% +2025-03-09 20:03:40,213 - train - INFO - Epoch: 67 | Test Loss: 1.198 | Test Acc: 60.10% +2025-03-09 20:03:40,438 - train - INFO - Epoch: 68 | Batch: 0 | Loss: 1.071 | Acc: 59.38% +2025-03-09 20:03:42,654 - train - INFO - Epoch: 68 | Batch: 100 | Loss: 1.195 | Acc: 59.75% +2025-03-09 20:03:44,633 - train - INFO - Epoch: 68 | Batch: 200 | Loss: 1.193 | Acc: 59.75% +2025-03-09 20:03:46,611 - train - INFO - Epoch: 68 | Batch: 300 | Loss: 1.193 | Acc: 59.72% +2025-03-09 20:03:49,635 - train - INFO - Epoch: 68 | Test Loss: 1.170 | Test Acc: 60.74% +2025-03-09 20:03:58,030 - train - INFO - Epoch: 69 | Batch: 0 | Loss: 1.065 | Acc: 57.81% +2025-03-09 20:04:00,061 - train - INFO - Epoch: 69 | Batch: 100 | Loss: 1.217 | Acc: 58.83% +2025-03-09 20:04:01,977 - train - INFO - Epoch: 69 | Batch: 200 | Loss: 1.226 | Acc: 58.59% +2025-03-09 20:04:03,999 - train - INFO - Epoch: 69 | Batch: 300 | Loss: 1.229 | Acc: 58.67% +2025-03-09 20:04:07,214 - train - INFO - Epoch: 69 | Test Loss: 1.170 | Test Acc: 61.05% +2025-03-09 20:04:07,420 - train - INFO - Epoch: 70 | Batch: 0 | Loss: 1.327 | Acc: 55.47% +2025-03-09 20:04:09,428 - train - INFO - Epoch: 70 | Batch: 100 | Loss: 1.212 | Acc: 59.03% +2025-03-09 20:04:11,490 - train - INFO - Epoch: 70 | Batch: 200 | Loss: 1.210 | Acc: 59.22% +2025-03-09 20:04:13,652 - train - INFO - Epoch: 70 | Batch: 300 | Loss: 1.201 | Acc: 59.55% +2025-03-09 20:04:16,600 - train - INFO - Epoch: 70 | Test Loss: 1.083 | Test Acc: 63.70% +2025-03-09 20:04:16,780 - train - INFO - Epoch: 71 | Batch: 0 | Loss: 1.090 | Acc: 64.84% +2025-03-09 20:04:18,739 - train - INFO - Epoch: 71 | Batch: 100 | Loss: 1.180 | Acc: 60.05% +2025-03-09 20:04:20,670 - train - INFO - Epoch: 71 | Batch: 200 | Loss: 1.185 | Acc: 59.98% +2025-03-09 20:04:22,543 - train - INFO - Epoch: 71 | Batch: 300 | Loss: 1.183 | Acc: 60.13% +2025-03-09 20:04:25,494 - train - INFO - Epoch: 71 | Test Loss: 1.122 | Test Acc: 63.08% +2025-03-09 20:04:25,672 - train - INFO - Epoch: 72 | Batch: 0 | Loss: 1.234 | Acc: 57.81% +2025-03-09 20:04:27,737 - train - INFO - Epoch: 72 | Batch: 100 | Loss: 1.229 | Acc: 58.80% +2025-03-09 20:04:29,724 - train - INFO - Epoch: 72 | Batch: 200 | Loss: 1.214 | Acc: 59.19% +2025-03-09 20:04:31,711 - train - INFO - Epoch: 72 | Batch: 300 | Loss: 1.205 | Acc: 59.47% +2025-03-09 20:04:35,205 - train - INFO - Epoch: 72 | Test Loss: 1.286 | Test Acc: 58.20% +2025-03-09 20:04:44,301 - train - INFO - Epoch: 73 | Batch: 0 | Loss: 1.294 | Acc: 57.81% +2025-03-09 20:04:46,184 - train - INFO - Epoch: 73 | Batch: 100 | Loss: 1.195 | Acc: 59.38% +2025-03-09 20:04:48,292 - train - INFO - Epoch: 73 | Batch: 200 | Loss: 1.186 | Acc: 59.92% +2025-03-09 20:04:50,218 - train - INFO - Epoch: 73 | Batch: 300 | Loss: 1.186 | Acc: 59.97% +2025-03-09 20:04:53,168 - train - INFO - Epoch: 73 | Test Loss: 1.130 | Test Acc: 62.16% +2025-03-09 20:04:53,370 - train - INFO - Epoch: 74 | Batch: 0 | Loss: 1.022 | Acc: 66.41% +2025-03-09 20:04:55,408 - train - INFO - Epoch: 74 | Batch: 100 | Loss: 1.219 | Acc: 58.83% +2025-03-09 20:04:57,367 - train - INFO - Epoch: 74 | Batch: 200 | Loss: 1.196 | Acc: 59.82% +2025-03-09 20:04:59,411 - train - INFO - Epoch: 74 | Batch: 300 | Loss: 1.184 | Acc: 60.10% +2025-03-09 20:05:02,780 - train - INFO - Epoch: 74 | Test Loss: 1.169 | Test Acc: 59.53% +2025-03-09 20:05:02,956 - train - INFO - Epoch: 75 | Batch: 0 | Loss: 1.042 | Acc: 64.06% +2025-03-09 20:05:04,928 - train - INFO - Epoch: 75 | Batch: 100 | Loss: 1.179 | Acc: 59.97% +2025-03-09 20:05:07,026 - train - INFO - Epoch: 75 | Batch: 200 | Loss: 1.164 | Acc: 60.59% +2025-03-09 20:05:09,052 - train - INFO - Epoch: 75 | Batch: 300 | Loss: 1.168 | Acc: 60.56% +2025-03-09 20:05:12,486 - train - INFO - Epoch: 75 | Test Loss: 1.086 | Test Acc: 64.61% +2025-03-09 20:05:12,668 - train - INFO - Epoch: 76 | Batch: 0 | Loss: 1.005 | Acc: 67.97% +2025-03-09 20:05:14,872 - train - INFO - Epoch: 76 | Batch: 100 | Loss: 1.188 | Acc: 59.54% +2025-03-09 20:05:17,020 - train - INFO - Epoch: 76 | Batch: 200 | Loss: 1.168 | Acc: 60.31% +2025-03-09 20:05:18,884 - train - INFO - Epoch: 76 | Batch: 300 | Loss: 1.171 | Acc: 60.22% +2025-03-09 20:05:21,857 - train - INFO - Epoch: 76 | Test Loss: 1.077 | Test Acc: 64.56% +2025-03-09 20:05:30,426 - train - INFO - Epoch: 77 | Batch: 0 | Loss: 0.993 | Acc: 67.97% +2025-03-09 20:05:32,481 - train - INFO - Epoch: 77 | Batch: 100 | Loss: 1.187 | Acc: 59.47% +2025-03-09 20:05:34,515 - train - INFO - Epoch: 77 | Batch: 200 | Loss: 1.162 | Acc: 60.40% +2025-03-09 20:05:36,603 - train - INFO - Epoch: 77 | Batch: 300 | Loss: 1.156 | Acc: 60.49% +2025-03-09 20:05:39,781 - train - INFO - Epoch: 77 | Test Loss: 1.148 | Test Acc: 61.77% +2025-03-09 20:05:39,985 - train - INFO - Epoch: 78 | Batch: 0 | Loss: 1.126 | Acc: 60.16% +2025-03-09 20:05:41,942 - train - INFO - Epoch: 78 | Batch: 100 | Loss: 1.135 | Acc: 61.81% +2025-03-09 20:05:43,948 - train - INFO - Epoch: 78 | Batch: 200 | Loss: 1.130 | Acc: 61.72% +2025-03-09 20:05:46,120 - train - INFO - Epoch: 78 | Batch: 300 | Loss: 1.144 | Acc: 61.18% +2025-03-09 20:05:49,313 - train - INFO - Epoch: 78 | Test Loss: 1.187 | Test Acc: 60.45% +2025-03-09 20:05:49,505 - train - INFO - Epoch: 79 | Batch: 0 | Loss: 1.205 | Acc: 60.94% +2025-03-09 20:05:51,476 - train - INFO - Epoch: 79 | Batch: 100 | Loss: 1.161 | Acc: 60.95% +2025-03-09 20:05:53,457 - train - INFO - Epoch: 79 | Batch: 200 | Loss: 1.157 | Acc: 61.07% +2025-03-09 20:05:55,418 - train - INFO - Epoch: 79 | Batch: 300 | Loss: 1.156 | Acc: 61.12% +2025-03-09 20:05:58,509 - train - INFO - Epoch: 79 | Test Loss: 1.056 | Test Acc: 63.68% +2025-03-09 20:05:58,686 - train - INFO - Epoch: 80 | Batch: 0 | Loss: 1.065 | Acc: 64.84% +2025-03-09 20:06:00,797 - train - INFO - Epoch: 80 | Batch: 100 | Loss: 1.121 | Acc: 62.57% +2025-03-09 20:06:02,773 - train - INFO - Epoch: 80 | Batch: 200 | Loss: 1.133 | Acc: 61.80% +2025-03-09 20:06:04,652 - train - INFO - Epoch: 80 | Batch: 300 | Loss: 1.145 | Acc: 61.24% +2025-03-09 20:06:07,896 - train - INFO - Epoch: 80 | Test Loss: 1.118 | Test Acc: 62.60% +2025-03-09 20:06:17,187 - train - INFO - Epoch: 81 | Batch: 0 | Loss: 0.981 | Acc: 64.06% +2025-03-09 20:06:19,096 - train - INFO - Epoch: 81 | Batch: 100 | Loss: 1.167 | Acc: 61.12% +2025-03-09 20:06:21,115 - train - INFO - Epoch: 81 | Batch: 200 | Loss: 1.154 | Acc: 61.34% +2025-03-09 20:06:23,095 - train - INFO - Epoch: 81 | Batch: 300 | Loss: 1.144 | Acc: 61.42% +2025-03-09 20:06:26,187 - train - INFO - Epoch: 81 | Test Loss: 1.173 | Test Acc: 60.44% +2025-03-09 20:06:26,358 - train - INFO - Epoch: 82 | Batch: 0 | Loss: 1.302 | Acc: 53.91% +2025-03-09 20:06:28,368 - train - INFO - Epoch: 82 | Batch: 100 | Loss: 1.138 | Acc: 61.95% +2025-03-09 20:06:30,300 - train - INFO - Epoch: 82 | Batch: 200 | Loss: 1.133 | Acc: 62.14% +2025-03-09 20:06:32,316 - train - INFO - Epoch: 82 | Batch: 300 | Loss: 1.134 | Acc: 62.13% +2025-03-09 20:06:35,449 - train - INFO - Epoch: 82 | Test Loss: 1.126 | Test Acc: 61.16% +2025-03-09 20:06:35,669 - train - INFO - Epoch: 83 | Batch: 0 | Loss: 1.332 | Acc: 53.12% +2025-03-09 20:06:37,637 - train - INFO - Epoch: 83 | Batch: 100 | Loss: 1.145 | Acc: 60.94% +2025-03-09 20:06:39,716 - train - INFO - Epoch: 83 | Batch: 200 | Loss: 1.138 | Acc: 61.66% +2025-03-09 20:06:41,791 - train - INFO - Epoch: 83 | Batch: 300 | Loss: 1.143 | Acc: 61.64% +2025-03-09 20:06:44,690 - train - INFO - Epoch: 83 | Test Loss: 1.201 | Test Acc: 61.08% +2025-03-09 20:06:44,856 - train - INFO - Epoch: 84 | Batch: 0 | Loss: 1.367 | Acc: 61.72% +2025-03-09 20:06:46,837 - train - INFO - Epoch: 84 | Batch: 100 | Loss: 1.157 | Acc: 61.66% +2025-03-09 20:06:48,817 - train - INFO - Epoch: 84 | Batch: 200 | Loss: 1.129 | Acc: 62.19% +2025-03-09 20:06:50,922 - train - INFO - Epoch: 84 | Batch: 300 | Loss: 1.124 | Acc: 62.21% +2025-03-09 20:06:54,051 - train - INFO - Epoch: 84 | Test Loss: 1.030 | Test Acc: 65.83% +2025-03-09 20:07:03,364 - train - INFO - Epoch: 85 | Batch: 0 | Loss: 0.997 | Acc: 66.41% +2025-03-09 20:07:06,169 - train - INFO - Epoch: 85 | Batch: 100 | Loss: 1.102 | Acc: 62.93% +2025-03-09 20:07:08,295 - train - INFO - Epoch: 85 | Batch: 200 | Loss: 1.099 | Acc: 62.92% +2025-03-09 20:07:10,316 - train - INFO - Epoch: 85 | Batch: 300 | Loss: 1.100 | Acc: 62.74% +2025-03-09 20:07:13,443 - train - INFO - Epoch: 85 | Test Loss: 0.993 | Test Acc: 66.24% +2025-03-09 20:07:13,613 - train - INFO - Epoch: 86 | Batch: 0 | Loss: 0.879 | Acc: 66.41% +2025-03-09 20:07:15,530 - train - INFO - Epoch: 86 | Batch: 100 | Loss: 1.070 | Acc: 63.68% +2025-03-09 20:07:17,620 - train - INFO - Epoch: 86 | Batch: 200 | Loss: 1.082 | Acc: 63.56% +2025-03-09 20:07:19,568 - train - INFO - Epoch: 86 | Batch: 300 | Loss: 1.095 | Acc: 63.20% +2025-03-09 20:07:22,857 - train - INFO - Epoch: 86 | Test Loss: 1.084 | Test Acc: 64.14% +2025-03-09 20:07:23,032 - train - INFO - Epoch: 87 | Batch: 0 | Loss: 1.100 | Acc: 59.38% +2025-03-09 20:07:25,120 - train - INFO - Epoch: 87 | Batch: 100 | Loss: 1.086 | Acc: 62.84% +2025-03-09 20:07:27,174 - train - INFO - Epoch: 87 | Batch: 200 | Loss: 1.096 | Acc: 62.75% +2025-03-09 20:07:29,361 - train - INFO - Epoch: 87 | Batch: 300 | Loss: 1.107 | Acc: 62.41% +2025-03-09 20:07:32,701 - train - INFO - Epoch: 87 | Test Loss: 1.017 | Test Acc: 66.01% +2025-03-09 20:07:32,864 - train - INFO - Epoch: 88 | Batch: 0 | Loss: 0.993 | Acc: 71.88% +2025-03-09 20:07:34,869 - train - INFO - Epoch: 88 | Batch: 100 | Loss: 1.064 | Acc: 64.05% +2025-03-09 20:07:36,860 - train - INFO - Epoch: 88 | Batch: 200 | Loss: 1.078 | Acc: 63.53% +2025-03-09 20:07:38,850 - train - INFO - Epoch: 88 | Batch: 300 | Loss: 1.093 | Acc: 63.05% +2025-03-09 20:07:41,861 - train - INFO - Epoch: 88 | Test Loss: 0.984 | Test Acc: 67.59% +2025-03-09 20:07:50,269 - train - INFO - Epoch: 89 | Batch: 0 | Loss: 1.033 | Acc: 64.06% +2025-03-09 20:07:52,348 - train - INFO - Epoch: 89 | Batch: 100 | Loss: 1.060 | Acc: 64.22% +2025-03-09 20:07:54,441 - train - INFO - Epoch: 89 | Batch: 200 | Loss: 1.060 | Acc: 64.06% +2025-03-09 20:07:56,483 - train - INFO - Epoch: 89 | Batch: 300 | Loss: 1.066 | Acc: 63.70% +2025-03-09 20:07:59,622 - train - INFO - Epoch: 89 | Test Loss: 1.052 | Test Acc: 65.01% +2025-03-09 20:07:59,816 - train - INFO - Epoch: 90 | Batch: 0 | Loss: 1.035 | Acc: 65.62% +2025-03-09 20:08:02,021 - train - INFO - Epoch: 90 | Batch: 100 | Loss: 1.072 | Acc: 64.50% +2025-03-09 20:08:04,109 - train - INFO - Epoch: 90 | Batch: 200 | Loss: 1.073 | Acc: 63.96% +2025-03-09 20:08:06,098 - train - INFO - Epoch: 90 | Batch: 300 | Loss: 1.079 | Acc: 63.72% +2025-03-09 20:08:09,147 - train - INFO - Epoch: 90 | Test Loss: 0.991 | Test Acc: 67.06% +2025-03-09 20:08:09,323 - train - INFO - Epoch: 91 | Batch: 0 | Loss: 1.019 | Acc: 66.41% +2025-03-09 20:08:11,288 - train - INFO - Epoch: 91 | Batch: 100 | Loss: 1.040 | Acc: 64.88% +2025-03-09 20:08:13,266 - train - INFO - Epoch: 91 | Batch: 200 | Loss: 1.043 | Acc: 64.61% +2025-03-09 20:08:15,248 - train - INFO - Epoch: 91 | Batch: 300 | Loss: 1.056 | Acc: 64.13% +2025-03-09 20:08:18,420 - train - INFO - Epoch: 91 | Test Loss: 1.099 | Test Acc: 63.08% +2025-03-09 20:08:18,624 - train - INFO - Epoch: 92 | Batch: 0 | Loss: 1.044 | Acc: 68.75% +2025-03-09 20:08:20,632 - train - INFO - Epoch: 92 | Batch: 100 | Loss: 1.055 | Acc: 64.34% +2025-03-09 20:08:22,645 - train - INFO - Epoch: 92 | Batch: 200 | Loss: 1.051 | Acc: 64.52% +2025-03-09 20:08:24,710 - train - INFO - Epoch: 92 | Batch: 300 | Loss: 1.063 | Acc: 63.84% +2025-03-09 20:08:27,778 - train - INFO - Epoch: 92 | Test Loss: 1.023 | Test Acc: 65.02% +2025-03-09 20:08:36,292 - train - INFO - Epoch: 93 | Batch: 0 | Loss: 1.010 | Acc: 64.06% +2025-03-09 20:08:38,381 - train - INFO - Epoch: 93 | Batch: 100 | Loss: 1.084 | Acc: 63.50% +2025-03-09 20:08:40,403 - train - INFO - Epoch: 93 | Batch: 200 | Loss: 1.055 | Acc: 64.34% +2025-03-09 20:08:42,384 - train - INFO - Epoch: 93 | Batch: 300 | Loss: 1.055 | Acc: 64.37% +2025-03-09 20:08:45,302 - train - INFO - Epoch: 93 | Test Loss: 1.040 | Test Acc: 65.43% +2025-03-09 20:08:45,476 - train - INFO - Epoch: 94 | Batch: 0 | Loss: 1.013 | Acc: 66.41% +2025-03-09 20:08:47,512 - train - INFO - Epoch: 94 | Batch: 100 | Loss: 1.048 | Acc: 64.67% +2025-03-09 20:08:49,355 - train - INFO - Epoch: 94 | Batch: 200 | Loss: 1.034 | Acc: 64.90% +2025-03-09 20:08:51,320 - train - INFO - Epoch: 94 | Batch: 300 | Loss: 1.032 | Acc: 65.03% +2025-03-09 20:08:54,352 - train - INFO - Epoch: 94 | Test Loss: 1.007 | Test Acc: 65.77% +2025-03-09 20:08:54,532 - train - INFO - Epoch: 95 | Batch: 0 | Loss: 0.963 | Acc: 68.75% +2025-03-09 20:08:56,486 - train - INFO - Epoch: 95 | Batch: 100 | Loss: 1.044 | Acc: 64.52% +2025-03-09 20:08:58,522 - train - INFO - Epoch: 95 | Batch: 200 | Loss: 1.059 | Acc: 64.16% +2025-03-09 20:09:00,641 - train - INFO - Epoch: 95 | Batch: 300 | Loss: 1.046 | Acc: 64.58% +2025-03-09 20:09:03,741 - train - INFO - Epoch: 95 | Test Loss: 1.032 | Test Acc: 65.96% +2025-03-09 20:09:03,950 - train - INFO - Epoch: 96 | Batch: 0 | Loss: 0.983 | Acc: 65.62% +2025-03-09 20:09:05,855 - train - INFO - Epoch: 96 | Batch: 100 | Loss: 1.032 | Acc: 64.93% +2025-03-09 20:09:07,774 - train - INFO - Epoch: 96 | Batch: 200 | Loss: 1.017 | Acc: 65.41% +2025-03-09 20:09:09,825 - train - INFO - Epoch: 96 | Batch: 300 | Loss: 1.017 | Acc: 65.49% +2025-03-09 20:09:12,898 - train - INFO - Epoch: 96 | Test Loss: 1.003 | Test Acc: 65.83% +2025-03-09 20:09:21,539 - train - INFO - Epoch: 97 | Batch: 0 | Loss: 0.992 | Acc: 66.41% +2025-03-09 20:09:23,513 - train - INFO - Epoch: 97 | Batch: 100 | Loss: 1.008 | Acc: 65.87% +2025-03-09 20:09:25,738 - train - INFO - Epoch: 97 | Batch: 200 | Loss: 1.015 | Acc: 65.68% +2025-03-09 20:09:27,822 - train - INFO - Epoch: 97 | Batch: 300 | Loss: 1.024 | Acc: 65.47% +2025-03-09 20:09:31,131 - train - INFO - Epoch: 97 | Test Loss: 0.978 | Test Acc: 67.57% +2025-03-09 20:09:31,294 - train - INFO - Epoch: 98 | Batch: 0 | Loss: 0.893 | Acc: 67.97% +2025-03-09 20:09:33,256 - train - INFO - Epoch: 98 | Batch: 100 | Loss: 1.005 | Acc: 65.52% +2025-03-09 20:09:35,255 - train - INFO - Epoch: 98 | Batch: 200 | Loss: 1.003 | Acc: 65.77% +2025-03-09 20:09:37,222 - train - INFO - Epoch: 98 | Batch: 300 | Loss: 1.001 | Acc: 66.07% +2025-03-09 20:09:40,328 - train - INFO - Epoch: 98 | Test Loss: 0.932 | Test Acc: 68.67% +2025-03-09 20:09:40,520 - train - INFO - Epoch: 99 | Batch: 0 | Loss: 0.896 | Acc: 66.41% +2025-03-09 20:09:42,505 - train - INFO - Epoch: 99 | Batch: 100 | Loss: 1.014 | Acc: 65.79% +2025-03-09 20:09:44,453 - train - INFO - Epoch: 99 | Batch: 200 | Loss: 1.009 | Acc: 65.97% +2025-03-09 20:09:46,594 - train - INFO - Epoch: 99 | Batch: 300 | Loss: 1.004 | Acc: 65.94% +2025-03-09 20:09:49,671 - train - INFO - Epoch: 99 | Test Loss: 0.956 | Test Acc: 68.30% +2025-03-09 20:09:49,866 - train - INFO - Epoch: 100 | Batch: 0 | Loss: 0.916 | Acc: 69.53% +2025-03-09 20:09:52,093 - train - INFO - Epoch: 100 | Batch: 100 | Loss: 1.005 | Acc: 65.68% +2025-03-09 20:09:54,201 - train - INFO - Epoch: 100 | Batch: 200 | Loss: 0.994 | Acc: 66.23% +2025-03-09 20:09:56,269 - train - INFO - Epoch: 100 | Batch: 300 | Loss: 0.995 | Acc: 66.23% +2025-03-09 20:09:59,443 - train - INFO - Epoch: 100 | Test Loss: 1.019 | Test Acc: 66.05% +2025-03-09 20:10:08,141 - train - INFO - 训练完成! diff --git a/Image/AlexNet/code/train.py b/Image/AlexNet/code/train.py index 734904ee5f2da25617dcfebed038f688bc17c4ad..3cfbdd1f28d14a24276b422dd7d2879dff34e98c 100644 --- a/Image/AlexNet/code/train.py +++ b/Image/AlexNet/code/train.py @@ -24,7 +24,8 @@ def main(): lr=args.lr, device=f'cuda:{args.gpu}', save_dir='../model', - model_name='alexnet' + model_name='alexnet', + layer_name='conv3.2' ) elif args.train_type == '1': train_model_data_augmentation(model, epochs=args.epochs, lr=args.lr, device=f'cuda:{args.gpu}', diff --git a/Image/AlexNet/model/.gitkeep b/Image/AlexNet/model/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/Image/AlexNet/model/0/epoch1/embeddings.npy b/Image/AlexNet/model/0/epoch1/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..32de2c195e9f8f713463de5dad120a1ea7325972 --- /dev/null +++ b/Image/AlexNet/model/0/epoch1/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6821ced877c4142fb43869d0c012cc021a0b345481a956e674f1ca23cf562dd +size 102400128 diff --git a/Image/AlexNet/model/0/epoch1/subject_model.pth b/Image/AlexNet/model/0/epoch1/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..e7cef4d9925dd6a50be86d00894743d159815b3b --- /dev/null +++ b/Image/AlexNet/model/0/epoch1/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:627be8aeffac84936d668c55f53d81bde77a4adccb8cc7b9fb5298c24db2377c +size 504030 diff --git a/Image/AlexNet/model/0/epoch10/embeddings.npy b/Image/AlexNet/model/0/epoch10/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..9f396b3c97f44d5d381219a3fe2ee00ec4b8a261 --- /dev/null +++ b/Image/AlexNet/model/0/epoch10/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa2fc49313edc663ca2d2f5f30ad503e4bd3bd327ee79ef750c672dc99e14117 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch10/subject_model.pth b/Image/AlexNet/model/0/epoch10/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..5faf83c1fb3311c1475e9799e86891f9579abcdf --- /dev/null +++ b/Image/AlexNet/model/0/epoch10/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7833a1f521ddeb7e339f405f5a9eabc1b72ae65e07cb04e7382b4157f3a524 +size 504030 diff --git a/Image/AlexNet/model/0/epoch11/embeddings.npy b/Image/AlexNet/model/0/epoch11/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..8c7ae5df5ebcbe2a35834a26caf4588c3c7f626e --- /dev/null +++ b/Image/AlexNet/model/0/epoch11/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c183185a4010aaa9b8d95d8b55c39d81d676d8cfe59bf7ab53d03f032d79a2e6 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch11/subject_model.pth b/Image/AlexNet/model/0/epoch11/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..e8667fb1e920ed4049e5c412124625dba1bc2099 --- /dev/null +++ b/Image/AlexNet/model/0/epoch11/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d9ebc99ef61325c7fc2d918b93213dff60b007e78ff8c0e6b742a3780376445 +size 504030 diff --git a/Image/AlexNet/model/0/epoch12/embeddings.npy b/Image/AlexNet/model/0/epoch12/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..c9db8c061fc51a2551f348a918799bcf7cda1332 --- /dev/null +++ b/Image/AlexNet/model/0/epoch12/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7feafa977c4b00e19952c97a3bfda6be0cfda676452ceb76dd90e4c46a0ec8c4 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch12/subject_model.pth b/Image/AlexNet/model/0/epoch12/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..c62872e8ccceba13cd21067961c086aea96a5f43 --- /dev/null +++ b/Image/AlexNet/model/0/epoch12/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c4a1d959572699b4c3d11ad82ea2c38c93c9d4fca4378ecf517d84290581d61 +size 504030 diff --git a/Image/AlexNet/model/0/epoch13/embeddings.npy b/Image/AlexNet/model/0/epoch13/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..e696e55eaedd5d65794a761275391e2ceaa7e0c1 --- /dev/null +++ b/Image/AlexNet/model/0/epoch13/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d168848025d937363ef62aee6e34687866f23066aaa833ae1ae5def57fa167ab +size 102400128 diff --git a/Image/AlexNet/model/0/epoch13/subject_model.pth b/Image/AlexNet/model/0/epoch13/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..6533f2bb08546b5073816319ab5c6bd39c6229f8 --- /dev/null +++ b/Image/AlexNet/model/0/epoch13/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:344989713106a9417acf7843ac665278f00ee668a27f6a86583304dc662eec55 +size 504030 diff --git a/Image/AlexNet/model/0/epoch14/embeddings.npy b/Image/AlexNet/model/0/epoch14/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..b9857ab3b124ac5378d929efca2c98ce3522c239 --- /dev/null +++ b/Image/AlexNet/model/0/epoch14/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1313f0b51793784f0f9dac212866d89e4d5100815bdfac64e478cd8294db245a +size 102400128 diff --git a/Image/AlexNet/model/0/epoch14/subject_model.pth b/Image/AlexNet/model/0/epoch14/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..1b2802211a7a6bed146498b5c6554fada829aeb5 --- /dev/null +++ b/Image/AlexNet/model/0/epoch14/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b602e19274664678c893a86a4ab009351ef30e1bdcc7b849e92c230ce1d3fbe +size 504030 diff --git a/Image/AlexNet/model/0/epoch15/embeddings.npy b/Image/AlexNet/model/0/epoch15/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..0af8c277be1647d6950fb2423fa327948315b5e0 --- /dev/null +++ b/Image/AlexNet/model/0/epoch15/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bae98bee14fde7e418d9170696bf1d58d7f6dc660029aa3ca398ebe36fe3a2b +size 102400128 diff --git a/Image/AlexNet/model/0/epoch15/subject_model.pth b/Image/AlexNet/model/0/epoch15/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..e5bff3194aa4a2fae3fcf16a80264a020c8e45ad --- /dev/null +++ b/Image/AlexNet/model/0/epoch15/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ac906242fc7fcf3d4caad304ceb0e513aaf9d07d9aa81b295e2e608e3770d92 +size 504030 diff --git a/Image/AlexNet/model/0/epoch16/embeddings.npy b/Image/AlexNet/model/0/epoch16/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..94bc81933d1dcbc41d9b58fd1fa3175e8a28a47d --- /dev/null +++ b/Image/AlexNet/model/0/epoch16/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d92f53bf3de4f7990a36788dac7f2fe278e2a2fe910892c02927730bb4d157 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch16/subject_model.pth b/Image/AlexNet/model/0/epoch16/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..1b1bc6223fffcd70e8f2839d3493a81afd283daf --- /dev/null +++ b/Image/AlexNet/model/0/epoch16/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71e3dde3dd78fdefd0103332ccfed0f1f6caaef52591f400211db0c2a5f75159 +size 504030 diff --git a/Image/AlexNet/model/0/epoch17/embeddings.npy b/Image/AlexNet/model/0/epoch17/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..4dc8c0a80cd1e14ef2278a02585be8e1c5006bb1 --- /dev/null +++ b/Image/AlexNet/model/0/epoch17/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63153e8eaa2813364e8274eed93c798b53fd83d648b3a3cb02d614da4249c7a9 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch17/subject_model.pth b/Image/AlexNet/model/0/epoch17/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..52bc3ab169dbaf21d5cfa73f30e2a747de8fd166 --- /dev/null +++ b/Image/AlexNet/model/0/epoch17/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4763ff84b8d47ec9168335351c43235bb16db45eed50b88e4cfb20f7830ba15a +size 504030 diff --git a/Image/AlexNet/model/0/epoch18/embeddings.npy b/Image/AlexNet/model/0/epoch18/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..4c45f1a1a8dab714d4f55c72b538f3014fc50b9d --- /dev/null +++ b/Image/AlexNet/model/0/epoch18/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ab17898773f2697e0d27efb3e001cdb36b16d1e50c4120254a7516484fdb852 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch18/subject_model.pth b/Image/AlexNet/model/0/epoch18/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..3a2de378eff7aec17d7137b513d23572e10cacab --- /dev/null +++ b/Image/AlexNet/model/0/epoch18/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:890aa5282eceb12688bb2962d3efcb183f0ea9133763edfa4538c795f70a4f35 +size 504030 diff --git a/Image/AlexNet/model/0/epoch19/embeddings.npy b/Image/AlexNet/model/0/epoch19/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..0b5e53f8472676b068429798e0205ec6d1158af7 --- /dev/null +++ b/Image/AlexNet/model/0/epoch19/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e216289ef89446d6036cb2c43fbdb6c422fb3a4494a266f37225e68ca9a212e +size 102400128 diff --git a/Image/AlexNet/model/0/epoch19/subject_model.pth b/Image/AlexNet/model/0/epoch19/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..817f94e1d5a28f143844fe48604c35de701fe3ab --- /dev/null +++ b/Image/AlexNet/model/0/epoch19/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4654d5eb2f0c48ccfc9401b39df7ccb1b05b71ee2fff4b82324b4fa36c06295a +size 504030 diff --git a/Image/AlexNet/model/0/epoch2/embeddings.npy b/Image/AlexNet/model/0/epoch2/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..aa6e0d410f45ae9fe14684d0f1671463bd2f88f5 --- /dev/null +++ b/Image/AlexNet/model/0/epoch2/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdd4967f08295d1a6cb6f754f2bde1d184ff98a5ee53d0927d516022c68b6e74 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch2/subject_model.pth b/Image/AlexNet/model/0/epoch2/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..fbd7883fcaed34faa3dbb61ef9ac30509e957035 --- /dev/null +++ b/Image/AlexNet/model/0/epoch2/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eda7982c92730cd281f998564592591d9ecbdaec5872cad7e48d1016a699cdf +size 504030 diff --git a/Image/AlexNet/model/0/epoch20/embeddings.npy b/Image/AlexNet/model/0/epoch20/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..b1b07dcf79f697c89d4763d7131f2c30cecd6470 --- /dev/null +++ b/Image/AlexNet/model/0/epoch20/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e24f5e27e0af90a5e05d219ac1e8b4145ff09891b9a96c4cdc7db888cb301da5 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch20/subject_model.pth b/Image/AlexNet/model/0/epoch20/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..092296d0dcf1a867d887fd1bea922da66b936319 --- /dev/null +++ b/Image/AlexNet/model/0/epoch20/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa5a282151c770473b2d2fba2d2d93bf7b0df419645752967f2847a66d69ba20 +size 504030 diff --git a/Image/AlexNet/model/0/epoch21/embeddings.npy b/Image/AlexNet/model/0/epoch21/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..f36ef79e195be407c9bae95a32c81f8765d090e5 --- /dev/null +++ b/Image/AlexNet/model/0/epoch21/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0cca779bd795f02b5dcfadbc0d94f242ba3b71a9595a9ecddd25dfe382809a8 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch21/subject_model.pth b/Image/AlexNet/model/0/epoch21/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..99dea18de91f2c597fc13c00c7de6d31f540609c --- /dev/null +++ b/Image/AlexNet/model/0/epoch21/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f75c8cd907e636860b42eb9e48c255275b35ffea8d796588e98933fbc63407e6 +size 504030 diff --git a/Image/AlexNet/model/0/epoch22/embeddings.npy b/Image/AlexNet/model/0/epoch22/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..45f789f784b8acf37090e53518a10950dcf58457 --- /dev/null +++ b/Image/AlexNet/model/0/epoch22/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:102862fc18fb57c1b80f5567edf3184b57f47941166871cf79224916ace6bfa9 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch22/subject_model.pth b/Image/AlexNet/model/0/epoch22/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..9818ef3f13345603b862dcada17a3bbf717c95b1 --- /dev/null +++ b/Image/AlexNet/model/0/epoch22/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83373d9958fa6dfea29a706fc915adff916d27ea4df4a43374a9d1b65ef10631 +size 504030 diff --git a/Image/AlexNet/model/0/epoch23/embeddings.npy b/Image/AlexNet/model/0/epoch23/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..989f544120aa93a9582c6840b0751a0932e721ad --- /dev/null +++ b/Image/AlexNet/model/0/epoch23/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f727d422d39de56d9ae9a3c702a1de543e51f2cfaa8f1b1e8c14e114f96380a8 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch23/subject_model.pth b/Image/AlexNet/model/0/epoch23/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..3d79c4d3a0ad03ccdb911fba514d4af47563ea12 --- /dev/null +++ b/Image/AlexNet/model/0/epoch23/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2783b7549f6773cf6795b6b5289aab01b6bc3435a5abf76058903609a26e1500 +size 504030 diff --git a/Image/AlexNet/model/0/epoch24/embeddings.npy b/Image/AlexNet/model/0/epoch24/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..0487f6b4abd4072c3db16ba368d246fd77da8b3d --- /dev/null +++ b/Image/AlexNet/model/0/epoch24/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:616994b0ca2995e7414d9f115dbf59d90beed0094dd7300e54c7ea55710ec4e3 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch24/subject_model.pth b/Image/AlexNet/model/0/epoch24/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..ebb0e2bed3d1e76995436422ce29090d76c1e229 --- /dev/null +++ b/Image/AlexNet/model/0/epoch24/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9901c77a9a23683436f90a0445550597ba5d4dcb3614b90ddf13ef3d7ba626a4 +size 504030 diff --git a/Image/AlexNet/model/0/epoch25/embeddings.npy b/Image/AlexNet/model/0/epoch25/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..d4f8372393c593aeab66b55547a9c62d2096c447 --- /dev/null +++ b/Image/AlexNet/model/0/epoch25/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f45051955a9872fd6be62caf1459b0c0c1f78516a925037c57b163257aa73c +size 102400128 diff --git a/Image/AlexNet/model/0/epoch25/subject_model.pth b/Image/AlexNet/model/0/epoch25/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..ad213cdb671242e985a29efdcbc8582290fccfa9 --- /dev/null +++ b/Image/AlexNet/model/0/epoch25/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1490c924fed9da15f400a73abb5a4592c807277997de473b9075b4f9c2cc178e +size 504030 diff --git a/Image/AlexNet/model/0/epoch3/embeddings.npy b/Image/AlexNet/model/0/epoch3/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..394ae67064fd9ef9578a95cd81bee85b9634f7e8 --- /dev/null +++ b/Image/AlexNet/model/0/epoch3/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e64bef991238315064614a7a4e9c0654b361466b76db179d51bd6983c91f21d +size 102400128 diff --git a/Image/AlexNet/model/0/epoch3/subject_model.pth b/Image/AlexNet/model/0/epoch3/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..833089e77e20d9dea5dd6ae8de36cb82b01f4a8b --- /dev/null +++ b/Image/AlexNet/model/0/epoch3/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e071afd5db9bf4992a8456350cc14f7448f72e9c8bd2633af60f4be2c46544e4 +size 504030 diff --git a/Image/AlexNet/model/0/epoch4/embeddings.npy b/Image/AlexNet/model/0/epoch4/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..a293f6f2871381462ebac6f921c8fd527b69a3bd --- /dev/null +++ b/Image/AlexNet/model/0/epoch4/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3bed11a3e5243cb7885899e78d3516d2b1be888964ec55dc9941e2c6536c275 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch4/subject_model.pth b/Image/AlexNet/model/0/epoch4/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..c6c0667da8ada91d45de4ef302b5bbfc536d4c5d --- /dev/null +++ b/Image/AlexNet/model/0/epoch4/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1601ea4d88a2e8c8b91d41260caaf1e0aa5ff2f3c7eec70c78834cbb18e5b09d +size 504030 diff --git a/Image/AlexNet/model/0/epoch5/embeddings.npy b/Image/AlexNet/model/0/epoch5/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..decb313a8705008c1a55c1a70037342266fcc0b7 --- /dev/null +++ b/Image/AlexNet/model/0/epoch5/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:113a78b111dcfed47bc133ab4c6a8938edb9d2c4602a46503f51ce1a5bb2bc4c +size 102400128 diff --git a/Image/AlexNet/model/0/epoch5/subject_model.pth b/Image/AlexNet/model/0/epoch5/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..a96a1b662a8b00aca909cff2368c722b2359bf38 --- /dev/null +++ b/Image/AlexNet/model/0/epoch5/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb5adc11bb4ba0b31461c0aadb7360d06dc1aec22e76d7edcc34058d4d3e8ed6 +size 504030 diff --git a/Image/AlexNet/model/0/epoch6/embeddings.npy b/Image/AlexNet/model/0/epoch6/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..3d5654dcde2667bf87447b01c10ff798fb5b2834 --- /dev/null +++ b/Image/AlexNet/model/0/epoch6/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf1b170704da827cdb96b00a47738201319d6396ce5077fd7458b922dc72a624 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch6/subject_model.pth b/Image/AlexNet/model/0/epoch6/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..56455eab0c78ad08f86a5ba35fe243f354bb7d1d --- /dev/null +++ b/Image/AlexNet/model/0/epoch6/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:414850c2aa0fd43cb68e0e6f0ac974151eb4006ec4af09541c11e0bd1494ba5e +size 504030 diff --git a/Image/AlexNet/model/0/epoch7/embeddings.npy b/Image/AlexNet/model/0/epoch7/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..b452f092aae27c248ae44b5815960b36930f321e --- /dev/null +++ b/Image/AlexNet/model/0/epoch7/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d43dd716702b8098e98f1933cda05a3825a7bdc2749456005487f861fb0d4f94 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch7/subject_model.pth b/Image/AlexNet/model/0/epoch7/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..d3c797ec9da16fae76b37b65eb4ea3cc445afdec --- /dev/null +++ b/Image/AlexNet/model/0/epoch7/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d44f638aa233cfa2811bcac1c769e25686b4fa3543439c951a2fb4594d3d8d1d +size 504030 diff --git a/Image/AlexNet/model/0/epoch8/embeddings.npy b/Image/AlexNet/model/0/epoch8/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..a88e0c6942117969224a32ff844124dc666160c1 --- /dev/null +++ b/Image/AlexNet/model/0/epoch8/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db67568518425df4f5704944358a6a60ae14ababca456ec88b4c0532769bf863 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch8/subject_model.pth b/Image/AlexNet/model/0/epoch8/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..a9777fc297215714abf8d9afbe13b72f46d7aecd --- /dev/null +++ b/Image/AlexNet/model/0/epoch8/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:073ed1ed2591e3413531afab86e3a5d27bf33da91972a0fc898e955030a9ea5b +size 504030 diff --git a/Image/AlexNet/model/0/epoch9/embeddings.npy b/Image/AlexNet/model/0/epoch9/embeddings.npy new file mode 100644 index 0000000000000000000000000000000000000000..ca5d1b7b889edf33ae03c0ca79199df06c2467ac --- /dev/null +++ b/Image/AlexNet/model/0/epoch9/embeddings.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0bf7d311b98757d49bf993a2f2903f6a2329a1065fa9f8f2f81a7811b01b4b2 +size 102400128 diff --git a/Image/AlexNet/model/0/epoch9/subject_model.pth b/Image/AlexNet/model/0/epoch9/subject_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..5018cb15e6503691baec8edde1662cd22688d6a2 --- /dev/null +++ b/Image/AlexNet/model/0/epoch9/subject_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27b8112eda4c12d9d438b35f36062dad8165b67b7086aeb830baa4ffc76749d6 +size 504030 diff --git a/Image/AlexNet/model/0/layer_info.json b/Image/AlexNet/model/0/layer_info.json new file mode 100644 index 0000000000000000000000000000000000000000..1d2c1d57f5992dcdc082fbfa7b156fdd3fa25201 --- /dev/null +++ b/Image/AlexNet/model/0/layer_info.json @@ -0,0 +1 @@ +{"layer_id": "conv3.2", "dim": 512} \ No newline at end of file diff --git a/Image/utils/train_utils.py b/Image/utils/train_utils.py index 0c588c6bbdf06689dd9fe624608334f78785a3c0..15f21bb3958dec205ed7ea511bf19cea9614e1f7 100644 --- a/Image/utils/train_utils.py +++ b/Image/utils/train_utils.py @@ -63,7 +63,7 @@ def setup_logger(log_file): return logger def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0', - save_dir='./checkpoints', model_name='model', save_type='0'): + save_dir='./checkpoints', model_name='model', save_type='0',layer_name=None): """通用的模型训练函数 Args: model: 要训练的模型 @@ -190,7 +190,7 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda f'Test Acc: {acc:.2f}%') # 每5个epoch保存一次 - if (epoch + 1) % 5 == 0: + if (epoch + 1) % 4 == 0: # 创建一个专门用于收集embedding的顺序dataloader ordered_loader = torch.utils.data.DataLoader( trainloader.dataset, # 使用相同的数据集 @@ -198,7 +198,7 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda shuffle=False, # 确保顺序加载 num_workers=trainloader.num_workers ) - save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True) + save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True, layer_name = layer_name) save_model.save() scheduler.step() diff --git a/ttv_utils/save_embeddings.py b/ttv_utils/save_embeddings.py index 9ac1e870c68364993805e2d7f375ede5d0238aa0..be151dbf4b8e4a9bb8a02712909501e8dc9d94da 100644 --- a/ttv_utils/save_embeddings.py +++ b/ttv_utils/save_embeddings.py @@ -15,7 +15,8 @@ class time_travel_saver: 4. 标签数据 (label/labels.npy) """ - def __init__(self, model, dataloader, device, save_dir, model_name, interval=1, auto_save_embedding=False): + def __init__(self, model, dataloader, device, save_dir, model_name, interval=1, + auto_save_embedding=False, layer_name=None): """初始化 Args: @@ -33,31 +34,14 @@ class time_travel_saver: self.model_name = model_name self.interval = interval self.auto_save = auto_save_embedding - - # 创建保存目录结构 - self.model_dir = os.path.join(save_dir, 'model') - self.dataset_dir = os.path.join(save_dir, 'dataset') - self.repr_dir = os.path.join(self.dataset_dir, 'representation') - self.pred_dir = os.path.join(self.dataset_dir, 'prediction') - self.label_dir = os.path.join(self.dataset_dir, 'label') - - for dir_path in [self.model_dir, self.repr_dir, self.pred_dir, self.label_dir]: - os.makedirs(dir_path, exist_ok=True) - + self.layer_name = layer_name # 获取当前epoch - self.current_epoch = len(os.listdir(self.model_dir)) + 1 + if len(os.listdir(self.save_dir)) == 0: + self.current_epoch = 1 + else: + self.current_epoch = len(os.listdir(self.save_dir)) - # 保存标签(只在第一次保存) - if not os.path.exists(os.path.join(self.label_dir, 'labels.npy')): - self._save_labels() - - def _save_labels(self): - """保存数据集标签""" - labels = [] - for _, targets in self.dataloader: - labels.extend(targets.numpy()) - np.save(os.path.join(self.label_dir, 'labels.npy'), np.array(labels)) - + def _extract_features_and_predictions(self): """提取特征和预测结果 @@ -77,6 +61,8 @@ class time_travel_saver: activation[name] = output.detach() return hook + # 根据层的名称或维度来选择层 + # 注册钩子到所有层 handles = [] for name, module in self.model.named_modules(): @@ -90,32 +76,45 @@ class time_travel_saver: inputs = inputs.to(self.device) _ = self.model(inputs) - # 找到维度在512-1024范围内的层 - target_dim_range = (256, 2048) - suitable_layer_name = None - suitable_dim = None - - # 分析所有层的输出维度 - for name, feat in activation.items(): + # 如果指定了层名,则直接使用该层 + if self.layer_name is not None: + if self.layer_name not in activation: + raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中") + + feat = activation[self.layer_name] if feat is None: - continue - feat_dim = feat.view(feat.size(0), -1).size(1) - if target_dim_range[0] <= feat_dim <= target_dim_range[1]: - suitable_layer_name = name - suitable_dim = feat_dim - break - - if suitable_layer_name is None: - raise ValueError("没有找到合适维度的特征层") + raise ValueError(f"指定的层 {self.layer_name} 没有输出特征") + + suitable_layer_name = self.layer_name + suitable_dim = feat.view(feat.size(0), -1).size(1) + print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") + else: + # 找到维度在指定范围内的层 + target_dim_range = (256, 2048) + suitable_layer_name = None + suitable_dim = None - print(f"选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") + # 分析所有层的输出维度 + for name, feat in activation.items(): + if feat is None: + continue + feat_dim = feat.view(feat.size(0), -1).size(1) + if target_dim_range[0] <= feat_dim <= target_dim_range[1]: + suitable_layer_name = name + suitable_dim = feat_dim + break + + if suitable_layer_name is None: + raise ValueError("没有找到合适维度的特征层") + + print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") # 保存层信息 layer_info = { 'layer_id': suitable_layer_name, 'dim': suitable_dim } - layer_info_path = os.path.join(self.dataset_dir, 'layer_info.json') + layer_info_path = os.path.join(self.save_dir, 'layer_info.json') with open(layer_info_path, 'w') as f: json.dump(layer_info, f) @@ -132,14 +131,6 @@ class time_travel_saver: flat_features = torch.flatten(feat, start_dim=1) features.append(flat_features.cpu().numpy()) - # 保存预测结果 - predictions.append(outputs.cpu().numpy()) - - # 保存索引 - indices.extend(range(batch_idx * self.dataloader.batch_size, - min((batch_idx + 1) * self.dataloader.batch_size, - len(self.dataloader.dataset)))) - # 清除本次的激活值 activation.clear() @@ -149,31 +140,27 @@ class time_travel_saver: if len(features) > 0: features = np.vstack(features) - predictions = np.vstack(predictions) - return features, predictions + return features else: - return np.array([]), np.array([]) + return np.array([]) def save(self, model = None): """保存所有数据""" if model is not None: self.model = model # 保存模型权重 - model_path = os.path.join(self.model_dir, f'{self.current_epoch}.pth') + os.makedirs(os.path.join(self.save_dir, f'epoch{self.current_epoch}'), exist_ok=True) + model_path = os.path.join(self.save_dir, f'epoch{self.current_epoch}', 'subject_model.pth') torch.save(self.model.state_dict(), model_path) if self.auto_save: # 提取并保存特征和预测结果 - features, predictions = self._extract_features_and_predictions() + features = self._extract_features_and_predictions() # 保存特征 - np.save(os.path.join(self.repr_dir, f'{self.current_epoch}.npy'), features) - - # 保存预测结果 - np.save(os.path.join(self.pred_dir, f'{self.current_epoch}.npy'), predictions) - + np.save(os.path.join(self.save_dir, f'epoch{self.current_epoch}', 'embeddings.npy'), features) + print(f"Epoch {self.current_epoch * self.interval} 的数据已保存:") print(f"- 模型权重: {model_path}") print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]") - print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]") - print(f"Epoch {self.current_epoch * self.interval} 的数据已保存") \ No newline at end of file + print(f"Epoch {self.current_epoch} 的数据已保存") \ No newline at end of file