atiwari751 commited on
Commit
089d70c
·
unverified ·
2 Parent(s): 3352589 5479df9

Merge pull request #3 from shrits-ai/main

Browse files
data_utils.py CHANGED
@@ -8,7 +8,7 @@ def get_train_transform():
8
  return A.Compose([
9
  A.RandomResizedCrop(height=224, width=224, scale=(0.08, 1.0), ratio=(3/4, 4/3), p=1.0),
10
  A.HorizontalFlip(p=0.5),
11
- A.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8),
12
  A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
13
  ToTensorV2()
14
  ])
@@ -28,4 +28,4 @@ def get_data_loaders(train_transform, test_transform, batch_size_train=128, batc
28
  testset = datasets.ImageFolder(root='/mnt/imagenet/ILSVRC/Data/CLS-LOC/val', transform=lambda img: test_transform(image=np.array(img))['image'])
29
  testloader = DataLoader(testset, batch_size=batch_size_test, shuffle=False, num_workers=8, pin_memory=True)
30
 
31
- return trainloader, testloader
 
8
  return A.Compose([
9
  A.RandomResizedCrop(height=224, width=224, scale=(0.08, 1.0), ratio=(3/4, 4/3), p=1.0),
10
  A.HorizontalFlip(p=0.5),
11
+ A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05, p=0.5),
12
  A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
13
  ToTensorV2()
14
  ])
 
28
  testset = datasets.ImageFolder(root='/mnt/imagenet/ILSVRC/Data/CLS-LOC/val', transform=lambda img: test_transform(image=np.array(img))['image'])
29
  testloader = DataLoader(testset, batch_size=batch_size_test, shuffle=False, num_workers=8, pin_memory=True)
30
 
31
+ return trainloader, testloader
lr_finder.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.optim as optim
3
+ import torch.nn as nn
4
+ from torch.optim.lr_scheduler import OneCycleLR
5
+ from torchvision import models, datasets, transforms
6
+ from torch.utils.data import DataLoader
7
+
8
+ # Load pretrained ResNet-50
9
+ model = models.resnet50(pretrained=True)
10
+ model.fc = nn.Linear(model.fc.in_features, num_classes) # Adjust for your dataset
11
+ model = model.to('cuda')
12
+
13
+ # Define optimizer and loss function
14
+ optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)
15
+ criterion = nn.CrossEntropyLoss()
16
+
17
+ # Prepare dataset and DataLoader
18
+ transform = transforms.Compose([
19
+ transforms.Resize(256),
20
+ transforms.CenterCrop(224),
21
+ transforms.ToTensor(),
22
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
23
+ ])
24
+ train_dataset = datasets.ImageFolder(root='/path/to/train', transform=transform)
25
+ train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=4)
26
+
27
+ # Set One-Cycle LR scheduler
28
+ epochs = 10
29
+ steps_per_epoch = len(train_loader)
30
+ lr_max = 1e-3 # Adjust based on LR Finder or task size
31
+
32
+ scheduler = OneCycleLR(optimizer, max_lr=lr_max, epochs=epochs, steps_per_epoch=steps_per_epoch)
33
+
34
+ # Training loop
35
+ for epoch in range(epochs):
36
+ model.train()
37
+ for inputs, labels in train_loader:
38
+ inputs, labels = inputs.to('cuda'), labels.to('cuda')
39
+
40
+ optimizer.zero_grad()
41
+ outputs = model(inputs)
42
+ loss = criterion(outputs, labels)
43
+ loss.backward()
44
+ optimizer.step()
45
+ scheduler.step() # Update learning rate using One-Cycle policy
46
+
47
+ print(f"Epoch {epoch+1}/{epochs} completed.")
48
+
main.py CHANGED
@@ -5,13 +5,19 @@ from resnet_model import ResNet50
5
  from data_utils import get_train_transform, get_test_transform, get_data_loaders
6
  from train_test import train, test
7
  from utils import save_checkpoint, load_checkpoint, plot_training_curves, plot_misclassified_samples
 
 
8
 
9
  def main():
10
  # Initialize model, loss function, and optimizer
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- model = ResNet50().to(device)
 
 
 
13
  criterion = nn.CrossEntropyLoss()
14
- optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
 
15
 
16
  # Load data
17
  train_transform = get_train_transform()
@@ -30,8 +36,16 @@ def main():
30
  results = []
31
  learning_rates = []
32
 
 
 
 
 
 
 
 
 
33
  # Training loop
34
- for epoch in range(start_epoch, 26):
35
  train_accuracy1, train_accuracy5, train_loss = train(model, device, trainloader, optimizer, criterion, epoch)
36
  test_accuracy1, test_accuracy5, test_loss, misclassified_images, misclassified_labels, misclassified_preds = test(model, device, testloader, criterion)
37
  print(f'Epoch {epoch} | Train Top-1 Acc: {train_accuracy1:.2f} | Test Top-1 Acc: {test_accuracy1:.2f}')
@@ -39,7 +53,8 @@ def main():
39
  # Append results for this epoch
40
  results.append((epoch, train_accuracy1, train_accuracy5, test_accuracy1, test_accuracy5, train_loss, test_loss))
41
  learning_rates.append(optimizer.param_groups[0]['lr'])
42
-
 
43
  # Save checkpoint
44
  save_checkpoint(model, optimizer, epoch, test_loss, checkpoint_path)
45
 
@@ -56,7 +71,9 @@ def main():
56
  plot_training_curves(epochs, train_acc1, test_acc1, train_acc5, test_acc5, train_losses, test_losses, learning_rates)
57
 
58
  # Plot misclassified samples
 
59
  plot_misclassified_samples(misclassified_images, misclassified_labels, misclassified_preds, classes=['class1', 'class2', ...]) # Replace with actual class names
 
60
 
61
  if __name__ == '__main__':
62
- main()
 
5
  from data_utils import get_train_transform, get_test_transform, get_data_loaders
6
  from train_test import train, test
7
  from utils import save_checkpoint, load_checkpoint, plot_training_curves, plot_misclassified_samples
8
+ from torchsummary import summary
9
+ from torch.optim.lr_scheduler import OneCycleLR
10
 
11
  def main():
12
  # Initialize model, loss function, and optimizer
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ model = ResNet50()
15
+ model = torch.nn.DataParallel(model)
16
+ model = model.to(device)
17
+ summary(model, input_size=(3, 224, 224))
18
  criterion = nn.CrossEntropyLoss()
19
+ optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)
20
+
21
 
22
  # Load data
23
  train_transform = get_train_transform()
 
36
  results = []
37
  learning_rates = []
38
 
39
+ # Set One-Cycle LR scheduler
40
+ num_epochs = 10
41
+ steps_per_epoch = len(trainloader)
42
+ lr_max = 1e-2
43
+
44
+ scheduler = OneCycleLR(optimizer, max_lr=lr_max, epochs=num_epochs, steps_per_epoch=steps_per_epoch)
45
+
46
+
47
  # Training loop
48
+ for epoch in range(start_epoch+1, start_epoch + num_epochs):
49
  train_accuracy1, train_accuracy5, train_loss = train(model, device, trainloader, optimizer, criterion, epoch)
50
  test_accuracy1, test_accuracy5, test_loss, misclassified_images, misclassified_labels, misclassified_preds = test(model, device, testloader, criterion)
51
  print(f'Epoch {epoch} | Train Top-1 Acc: {train_accuracy1:.2f} | Test Top-1 Acc: {test_accuracy1:.2f}')
 
53
  # Append results for this epoch
54
  results.append((epoch, train_accuracy1, train_accuracy5, test_accuracy1, test_accuracy5, train_loss, test_loss))
55
  learning_rates.append(optimizer.param_groups[0]['lr'])
56
+
57
+ scheduler.step()
58
  # Save checkpoint
59
  save_checkpoint(model, optimizer, epoch, test_loss, checkpoint_path)
60
 
 
71
  plot_training_curves(epochs, train_acc1, test_acc1, train_acc5, test_acc5, train_losses, test_losses, learning_rates)
72
 
73
  # Plot misclassified samples
74
+ '''
75
  plot_misclassified_samples(misclassified_images, misclassified_labels, misclassified_preds, classes=['class1', 'class2', ...]) # Replace with actual class names
76
+ '''
77
 
78
  if __name__ == '__main__':
79
+ main()
tmppl87qjev/_remote_module_non_scriptable.py DELETED
@@ -1,81 +0,0 @@
1
- from typing import *
2
-
3
- import torch
4
- import torch.distributed.rpc as rpc
5
- from torch import Tensor
6
- from torch._jit_internal import Future
7
- from torch.distributed.rpc import RRef
8
- from typing import Tuple # pyre-ignore: unused import
9
-
10
-
11
- module_interface_cls = None
12
-
13
-
14
- def forward_async(self, *args, **kwargs):
15
- args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
- kwargs = {**kwargs}
17
- return rpc.rpc_async(
18
- self.module_rref.owner(),
19
- _remote_forward,
20
- args,
21
- kwargs,
22
- )
23
-
24
-
25
- def forward(self, *args, **kwargs):
26
- args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
- kwargs = {**kwargs}
28
- ret_fut = rpc.rpc_async(
29
- self.module_rref.owner(),
30
- _remote_forward,
31
- args,
32
- kwargs,
33
- )
34
- return ret_fut.wait()
35
-
36
-
37
- _generated_methods = [
38
- forward_async,
39
- forward,
40
- ]
41
-
42
-
43
-
44
-
45
- def _remote_forward(
46
- module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
- module = module_rref.local_value()
48
- device = torch.device(device)
49
-
50
- if device.type != "cuda":
51
- return module.forward(*args, **kwargs)
52
-
53
- # If the module is on a cuda device,
54
- # move any CPU tensor in args or kwargs to the same cuda device.
55
- # Since torch script does not support generator expression,
56
- # have to use concatenation instead of
57
- # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
- args = (*args,)
59
- out_args: Tuple[()] = ()
60
- for arg in args:
61
- arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
- out_args = out_args + arg
63
-
64
- kwargs = {**kwargs}
65
- for k, v in kwargs.items():
66
- if isinstance(v, Tensor):
67
- kwargs[k] = kwargs[k].to(device)
68
-
69
- if is_device_map_set:
70
- return module.forward(*out_args, **kwargs)
71
-
72
- # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
- # so have to move any GPU tensor to CPU in the output.
74
- # Since torch script does not support generator expression,
75
- # have to use concatenation instead of
76
- # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
- ret: Tuple[()] = ()
78
- for i in module.forward(*out_args, **kwargs):
79
- i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
- ret = ret + i
81
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
train_test.py CHANGED
@@ -31,6 +31,9 @@ def train(model, device, train_loader, optimizer, criterion, epoch, accumulation
31
 
32
  pbar.set_description(desc=f'Epoch {epoch} | Loss: {running_loss / (batch_idx + 1):.4f} | Top-1 Acc: {100. * correct1 / total:.2f} | Top-5 Acc: {100. * correct5 / total:.2f}')
33
 
 
 
 
34
  return 100. * correct1 / total, 100. * correct5 / total, running_loss / len(train_loader)
35
 
36
  def test(model, device, test_loader, criterion):
@@ -56,13 +59,15 @@ def test(model, device, test_loader, criterion):
56
  correct5 += predicted.eq(targets.view(-1, 1).expand_as(predicted)).sum().item()
57
 
58
  # Collect misclassified samples
 
59
  for i in range(inputs.size(0)):
60
  if targets[i] not in predicted[i, :1]:
61
  misclassified_images.append(inputs[i].cpu())
62
  misclassified_labels.append(targets[i].cpu())
63
  misclassified_preds.append(predicted[i, :1].cpu())
 
64
 
65
  test_accuracy1 = 100. * correct1 / total
66
  test_accuracy5 = 100. * correct5 / total
67
  print(f'Test Loss: {test_loss/len(test_loader):.4f}, Top-1 Accuracy: {test_accuracy1:.2f}, Top-5 Accuracy: {test_accuracy5:.2f}')
68
- return test_accuracy1, test_accuracy5, test_loss / len(test_loader), misclassified_images, misclassified_labels, misclassified_preds
 
31
 
32
  pbar.set_description(desc=f'Epoch {epoch} | Loss: {running_loss / (batch_idx + 1):.4f} | Top-1 Acc: {100. * correct1 / total:.2f} | Top-5 Acc: {100. * correct5 / total:.2f}')
33
 
34
+ if (batch_idx + 1) % 50 == 0:
35
+ torch.cuda.empty_cache()
36
+
37
  return 100. * correct1 / total, 100. * correct5 / total, running_loss / len(train_loader)
38
 
39
  def test(model, device, test_loader, criterion):
 
59
  correct5 += predicted.eq(targets.view(-1, 1).expand_as(predicted)).sum().item()
60
 
61
  # Collect misclassified samples
62
+ '''
63
  for i in range(inputs.size(0)):
64
  if targets[i] not in predicted[i, :1]:
65
  misclassified_images.append(inputs[i].cpu())
66
  misclassified_labels.append(targets[i].cpu())
67
  misclassified_preds.append(predicted[i, :1].cpu())
68
+ '''
69
 
70
  test_accuracy1 = 100. * correct1 / total
71
  test_accuracy5 = 100. * correct5 / total
72
  print(f'Test Loss: {test_loss/len(test_loader):.4f}, Top-1 Accuracy: {test_accuracy1:.2f}, Top-5 Accuracy: {test_accuracy5:.2f}')
73
+ return test_accuracy1, test_accuracy5, test_loss / len(test_loader), misclassified_images, misclassified_labels, misclassified_preds
utils.py CHANGED
@@ -9,13 +9,15 @@ def save_checkpoint(model, optimizer, epoch, loss, path):
9
  'optimizer_state_dict': optimizer.state_dict(),
10
  'loss': loss,
11
  }, path)
 
12
 
13
  def load_checkpoint(model, optimizer, path):
14
- checkpoint = torch.load(path)
15
  model.load_state_dict(checkpoint['model_state_dict'])
16
  optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
17
  epoch = checkpoint['epoch']
18
  loss = checkpoint['loss']
 
19
  return model, optimizer, epoch, loss
20
 
21
  def plot_training_curves(epochs, train_acc1, test_acc1, train_acc5, test_acc5, train_losses, test_losses, learning_rates):
@@ -62,4 +64,60 @@ def plot_misclassified_samples(misclassified_images, misclassified_labels, miscl
62
  plt.imshow(misclassified_grid.permute(1, 2, 0))
63
  plt.title("Misclassified Samples")
64
  plt.axis('off')
65
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  'optimizer_state_dict': optimizer.state_dict(),
10
  'loss': loss,
11
  }, path)
12
+ print(f"Checkpoint saved at epoch {epoch}")
13
 
14
  def load_checkpoint(model, optimizer, path):
15
+ checkpoint = torch.load(path, weights_only=True)
16
  model.load_state_dict(checkpoint['model_state_dict'])
17
  optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
18
  epoch = checkpoint['epoch']
19
  loss = checkpoint['loss']
20
+ print(f"Checkpoint loaded, resuming from epoch {epoch}")
21
  return model, optimizer, epoch, loss
22
 
23
  def plot_training_curves(epochs, train_acc1, test_acc1, train_acc5, test_acc5, train_losses, test_losses, learning_rates):
 
64
  plt.imshow(misclassified_grid.permute(1, 2, 0))
65
  plt.title("Misclassified Samples")
66
  plt.axis('off')
67
+ plt.show()
68
+
69
+ def find_lr(model, criterion, optimizer, train_loader, num_epochs=1, start_lr=1e-7, end_lr=10, lr_multiplier=1.1):
70
+ """
71
+ Find the optimal learning rate using LR Finder.
72
+
73
+ Args:
74
+ - model: The model to train
75
+ - criterion: Loss function (e.g., CrossEntropyLoss)
76
+ - optimizer: Optimizer (e.g., SGD)
77
+ - train_loader: DataLoader for training data
78
+ - num_epochs: Number of epochs to run the LR Finder (typically 1-2)
79
+ - start_lr: Starting learning rate for the experiment
80
+ - end_lr: Maximum learning rate (used for scaling)
81
+ - lr_multiplier: Factor by which the learning rate is increased every batch
82
+
83
+ Returns:
84
+ - A plot of loss vs learning rate
85
+ """
86
+ lrs = []
87
+ losses = []
88
+ avg_loss = 0.0
89
+ batch_count = 0
90
+
91
+ lr = start_lr
92
+ for epoch in range(num_epochs):
93
+ model.train()
94
+ for inputs, labels in train_loader:
95
+ inputs, labels = inputs.to(device), labels.to(device)
96
+ optimizer.param_groups[0]['lr'] = lr # Set the learning rate
97
+
98
+ # Forward pass
99
+ optimizer.zero_grad()
100
+ outputs = model(inputs)
101
+ loss = criterion(outputs, labels)
102
+ loss.backward()
103
+ optimizer.step()
104
+
105
+ avg_loss += loss.item()
106
+ batch_count += 1
107
+ lrs.append(lr)
108
+ losses.append(loss.item())
109
+
110
+ # Increase the learning rate for next batch
111
+ lr *= lr_multiplier
112
+
113
+ avg_loss /= batch_count
114
+ print(f"Epoch [{epoch+1}/{num_epochs}], Avg Loss: {avg_loss:.4f}")
115
+
116
+ # Plot the loss vs learning rate
117
+ plt.plot(lrs, losses)
118
+ plt.xscale('log')
119
+ plt.xlabel('Learning Rate')
120
+ plt.ylabel('Loss')
121
+ plt.title('Learning Rate Finder')
122
+ plt.show()
123
+