id
stringlengths
3
8
text
stringlengths
1
115k
st32268
The num_embeddings should be 3 so that all indices can be used to index the weight matrix.
st32269
even my indices is lower than embedding_dim-1 within a batch , i am still getting IndexError: index out of range in self , here is my data and code. In each batch for Age category size is 20 and input embedding size is 70 , dont know why indexing error is throwing data = pd.read_csv('Churn_Modelling.csv') print("Shape:", data.shape) data.head() X_train = data[['Age','Balance']] y_train = pd.DataFrame(data['Exited']) X_train Shape: (10000, 14) Age Balance ---- ------- 0 42 0.00 1 41 83807.86 2 42 159660.80 3 39 0.00 4 43 125510.82 10000 rows × 2 columns y_train Exited ------- 0 1 1 0 2 1 3 0 4 0 10000 rows × 1 columns features = ['Age'] for col in features: X_train.loc[:,col] = X_train.loc[:,col].astype('category') X_train.dtypes Age category Balance float64 dtype: object embedded_cols = {n: len(col.cat.categories) for n,col in X_train[features].items()} embedded_cols {'Age': 70} class ShelterOutcomeDataset(Dataset): def __init__(self, X, Y, embedded_col_names): X = X.copy() self.X1 = X.loc[:,embedded_col_names].copy().values.astype(np.int64) #categorical columns self.X2 = X.drop(columns=embedded_col_names).copy().values.astype(np.float32) #numerical columns self.y = Y.copy().values.astype(np.int64) def __len__(self): return len(self.y) def __getitem__(self, idx): return self.X1[idx], self.X2[idx], self.y[idx] embedding_sizes = [(n_categories, min(50, (n_categories+1)//2)) for _,n_categories in embedded_cols.items()] embedding_sizes [(70, 35)] train_ds = ShelterOutcomeDataset(X_train,y_train , ['Age']) class testNet(nn.Module): def __init__(self, emb_dims, n_cont): super().__init__() self.embeddings = nn.ModuleList([nn.Embedding(categories, size) for categories,size in emb_dims]) no_of_embs = sum(e.embedding_dim for e in self.embeddings) #length of all embeddings combined self.n_emb, self.n_cont = no_of_embs, n_cont self.lin1 = nn.Linear(self.n_emb + self.n_cont,6) self.lin2 = nn.Linear(6, 4) self.lin3 = nn.Linear(4, 2) self.bn1 = nn.BatchNorm1d(self.n_cont) self.bn2 = nn.BatchNorm1d(6) self.bn3 = nn.BatchNorm1d(4) self.emb_drop = nn.Dropout(0.6) self.drops = nn.Dropout(0.3) def forward(self, x_cat, x_cont): x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] x = torch.cat(x, 1) x = self.emb_drop(x) # batch normalization over continous features x2 = self.bn1(x_cont) # concatenate both embedding and continous feature , here 1 means dim # the dimension over which the tensors are concatenated we are concatenating columns x = torch.cat([x, x2], 1) x = F.relu(self.lin1(x)) x = self.drops(x) x = self.bn2(x) x = F.relu(self.lin2(x)) x = self.drops(x) x = self.bn3(x) x = self.lin3(x) return x import torch.nn as nn criterion = nn.CrossEntropyLoss() def train_model(model, optim, train_dl): model.train() total = 0 sum_loss = 0 for cat, cont, y in train_dl: batch = y.shape[0] print(cat.size()) # <--- size of features whihc has to be embeded y = y.to(torch.float32) output = model(cat, cont) _,pred = torch.max(output,1) loss = criterion(output, y.squeeze(1).long()) optim.zero_grad() loss.backward() optim.step() total += batch sum_loss += batch*(loss.item()) return sum_loss/total,pred def train_loop(model, epochs, lr=0.01, wd=0.0): optim = get_optimizer(model, lr = lr, wd = wd) for epoch in range(epochs): loss,pred = train_model(model, optim, train_dl) if (epoch+1) % 5 ==0: print(f'epoch : {epoch+1},training loss : {loss}, output : {output}') batch_size = 20 train_dl = DataLoader(train_ds, batch_size=batch_size,shuffle=True) #valid_dl = DataLoader(valid_ds, batch_size=batch_size,shuffle=True) train_dl = DeviceDataLoader(train_dl, device) # valid_dl = DeviceDataLoader(valid_dl, device) # model = ShelterOutcomeModel(embedding_sizes,0) model = testNet(embedding_sizes,1) print(model) from collections import defaultdict opt = torch.optim.Adam(model.parameters(), lr=1e-2) # to_device(model, device) train_loop(model, epochs=100, lr=0.01, wd=0.00001) testNet( (embeddings): ModuleList( (0): Embedding(70, 35) ) (lin1): Linear(in_features=36, out_features=6, bias=True) (lin2): Linear(in_features=6, out_features=4, bias=True) (lin3): Linear(in_features=4, out_features=2, bias=True) (bn1): BatchNorm1d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (bn2): BatchNorm1d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (bn3): BatchNorm1d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (emb_drop): Dropout(p=0.6, inplace=False) (drops): Dropout(p=0.3, inplace=False) ) torch.Size([20, 1]) --------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-3281-888e52d4559c> in <module> 74 # to_device(model, device) 75 ---> 76 train_loop(model, epochs=100, lr=0.01, wd=0.00001) <ipython-input-3281-888e52d4559c> in train_loop(model, epochs, lr, wd) 46 optim = get_optimizer(model, lr = lr, wd = wd) 47 for epoch in range(epochs): ---> 48 loss,pred = train_model(model, optim, train_dl) 49 if (epoch+1) % 5 ==0: 50 print(f'epoch : {epoch+1},training loss : {loss}, output : {output}') <ipython-input-3281-888e52d4559c> in train_model(model, optim, train_dl) 15 16 ---> 17 output = model(cat, cont) 18 _,pred = torch.max(output,1) 19 ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(), <ipython-input-3280-681fc4d5712d> in forward(self, x_cat, x_cont) 30 31 ---> 32 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] 33 x = torch.cat(x, 1) 34 <ipython-input-3280-681fc4d5712d> in <listcomp>(.0) 30 31 ---> 32 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] 33 x = torch.cat(x, 1) 34 ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(), ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/sparse.py in forward(self, input) 122 123 def forward(self, input: Tensor) -> Tensor: --> 124 return F.embedding( 125 input, self.weight, self.padding_idx, self.max_norm, 126 self.norm_type, self.scale_grad_by_freq, self.sparse) ~/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse) 1812 # remove once script supports set_grad_enabled 1813 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) -> 1814 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) 1815 1816 IndexError: index out of range in self
st32270
An index value of 70 for an embedding layer size of 70 won’t work, since the valid indices would be in the range [0, 69], so you would either need to increase the num_embeddings value or clip the input.
st32271
@ptrblck even i increase +1 into num_embeddings i am getting same error self.embeddings = nn.ModuleList([nn.Embedding(categories+1, size) for categories,size in emb_dims]) Interesting thing is when i increase it by 23 it does not give error class testNet(nn.Module): def __init__(self, emb_dims, n_cont): super().__init__() for categories,size in emb_dims: print(f'catagrorize is {categories}, size is {size}') self.embeddings = nn.ModuleList([nn.Embedding(categories+23, size) for categories,size in emb_dims]) no_of_embs = sum(e.embedding_dim for e in self.embeddings) #length of all embeddings combined self.n_emb, self.n_cont = no_of_embs, n_cont self.lin1 = nn.Linear(self.n_emb + self.n_cont,6) self.lin2 = nn.Linear(6, 4) self.lin3 = nn.Linear(4, 2) self.bn1 = nn.BatchNorm1d(self.n_cont) self.bn2 = nn.BatchNorm1d(6) self.bn3 = nn.BatchNorm1d(4) self.emb_drop = nn.Dropout(0.6) self.drops = nn.Dropout(0.3) def forward(self, x_cat, x_cont): # take the embedding list and grab an embedding and pass in our single row of data. x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] x = torch.cat(x, 1) x = self.emb_drop(x) x2 = self.bn1(x_cont) x = torch.cat([x, x2], 1) x = F.relu(self.lin1(x)) x = self.drops(x) x = self.bn2(x) x = F.relu(self.lin2(x)) x = self.drops(x) x = self.bn3(x) x = self.lin3(x) return x import torch.nn as nn criterion = nn.CrossEntropyLoss() def train_model(model, optim, train_dl): model.train() total = 0 sum_loss = 0 for cat, cont, y in train_dl: batch = y.shape[0] y = y.to(torch.float32) output = model(cat, cont) _,pred = torch.max(output,1) loss = criterion(output, y.squeeze(1).long()) optim.zero_grad() loss.backward() optim.step() total += batch sum_loss += batch*(loss.item()) return sum_loss/total,pred def train_loop(model, epochs, lr=0.01, wd=0.0): optim = get_optimizer(model, lr = lr, wd = wd) for epoch in range(epochs): loss,pred = train_model(model, optim, train_dl) if (epoch+1) % 5 ==0: print(f'epoch : {epoch+1},training loss : {loss}, output : {output}') batch_size = 100 train_dl = DataLoader(train_ds, batch_size=batch_size,shuffle=True) train_dl = DeviceDataLoader(train_dl, device) print(f'embedding_sizes is {embedding_sizes}') model = testNet(embedding_sizes,1) from collections import defaultdict opt = torch.optim.Adam(model.parameters(), lr=1e-2) train_loop(model, epochs=100, lr=0.01, wd=0.00001) embedding_sizes is [(70, 35)] catagrorize is 70, size is 35 epoch : 5,training loss : 0.4648001512885094, output : 0.6111002564430237 epoch : 10,training loss : 0.4541498306393623, output : 0.6111002564430237 epoch : 15,training loss : 0.45384191155433656, output : 0.6111002564430237 epoch : 20,training loss : 0.45079687386751177, output : 0.6111002564430237 epoch : 25,training loss : 0.4511949673295021, output : 0.6111002564430237 epoch : 30,training loss : 0.45295464009046554, output : 0.6111002564430237 epoch : 35,training loss : 0.45299509912729263, output : 0.6111002564430237 epoch : 40,training loss : 0.45105998665094377, output : 0.6111002564430237 epoch : 45,training loss : 0.4528631994128227, output : 0.6111002564430237 epoch : 50,training loss : 0.4509485891461372, output : 0.6111002564430237 epoch : 55,training loss : 0.4534462735056877, output : 0.6111002564430237 epoch : 60,training loss : 0.4507604452967644, output : 0.6111002564430237 epoch : 65,training loss : 0.4527029529213905, output : 0.6111002564430237 epoch : 70,training loss : 0.4511090362071991, output : 0.6111002564430237 epoch : 75,training loss : 0.4510712164640427, output : 0.6111002564430237 epoch : 80,training loss : 0.4523083609342575, output : 0.6111002564430237 epoch : 85,training loss : 0.4539755055308342, output : 0.6111002564430237 epoch : 90,training loss : 0.4536020648479462, output : 0.6111002564430237 epoch : 95,training loss : 0.4528249257802963, output : 0.6111002564430237 epoch : 100,training loss : 0.45215955764055255, output : 0.6111002564430237
st32272
TheOraware: Interesting thing is when i increase it by 23 it does not give error This could mean that the max. index is larger than you expect, so you could add assert statements to your code to narrow down the largest values.
st32273
@ptrblck thanks for pointing me out , could you please give me this statement as i am very new to pytorch , how and where i need to add assert statements?
st32274
In your training loop you could use something like this: # works target = torch.tensor([69, 68]) assert (target<70).all(), "target: {} invalid".format(target) # fails target = torch.tensor([69, 70]) assert (target<70).all(), "target: {} invalid".format(target) which would trigger the errors for target values larger than the used threshold. Once you are running into these errors, check why you expect them to be <70 and why they are apparently not in this range.
st32275
@ptrblck thanks mate , i work on it and get back to you here if i need more assitance - you are great
st32276
@ptrblck , can you please help me where to put your above suggested code , as you mentioned in training loop then i guess it could be like def train_loop(model, epochs, lr=0.01, wd=0.0): optim = get_optimizer(model, lr = lr, wd = wd) for epoch in range(epochs): target = torch.tensor([69, 68])<--------------- assert (target<70).all(), "target: {} invalid".format(target)<------------- loss,pred = train_model(model, optim, train_dl) if (epoch+1) % 5 ==0: print(f'epoch : {epoch+1},training loss : {loss}, output : {output}')
st32277
@ptrblck i fixed issue , actually that was due to not converting Age column into Label Encoder , after transforming into label encoder , data needs to represented by its Index for embedding. Label Encoding is for index before your input to the embedding layer. It means some of Age which were not transformed into label encoder initially should have value greater than 70 or 69 was causing this problem. Is my understanding correct ? Do we really need to convert columns into label encoding before embedding?
st32278
You don’t need to use a label encoding, but it can be useful if you want to map your targets to [0, nb_classes-1], which is expected by the embedding layer (and usually also loss functions). Alternatively, you could also use the max. value of the “unencoded” target and set it as the num_embeddings in case you are expecting to get the missing values in the future.
st32279
@ptrblck can you please shed some light on it https://discuss.pytorch.org/t/embedding-layer/121969 30
st32280
Hello Everyone, I am training an Autoencoder based on Resnet-Unet Architecture. Here the loss remains constant through out training. I tried varying the learning rate, Used learning rate scheduler, played arround with different optimizers and loss functions(SSE, BCE etc). Used normalized and unnormalized data .I followed the suggestions provided by in the pytorch forum. But was unable to fix the problem. It would be great if someone can point out where i am going wrong. Thank you Code batch_size=3 def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.ReLU(inplace=True), ) class ResNetUNet(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.conv_last = nn.Conv2d(64, n_class, 1) def forward(self, input): x_original = self.conv_original_size0(input) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(input) #save_image(layer0,"input_layer0.png") layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) # layer4 = self.layer4_1x1(layer4) x = self.upsample(layer4) print(x.shape) save_image(x[1,1,:,:],"upsample_layer.png") save_image(layer3[1,0,:,:],"layer_3.png") save_image(x[1,0,:,:],"layer_3_x.png") layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) save_image(layer2[1,0,:,:],"layer_2.png") save_image(x[1,0,:,:],"layer_2_x.png") layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) save_image(layer1[1,0,:,:],"layer_1.png") save_image(x[1,0,:,:],"layer_1_x.png") layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) save_image(layer0[1,0,:,:],"layer_0.png") save_image(x[1,0,:,:],"layer_0_x.png") layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) out = self.conv_last(x) save_image(out[1,0,:,:],"Last_layer_ch1.png") save_image(out[1,1,:,:],"Last_layer_ch2.png") save_image(out[1,2,:,:],"Last_layer_ch3.png") return out #********************************************************************************************************************************************************** device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = ResNetUNet(n_class=3) summary(model, input_size=(3, 224, 224)) model = model.to(device) transform = transforms.Compose([ transforms.Resize((224,224), interpolation=2),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) trainset=torchvision.datasets.ImageFolder("../data/train", transform=transform, target_transform=None) trainloader = torch.utils.data.DataLoader(trainset, shuffle = True , batch_size = batch_size , num_workers = 2, drop_last=True) testset=torchvision.datasets.ImageFolder("../data/test", transform=transform, target_transform=None) testloader = torch.utils.data.DataLoader(testset, shuffle = True , batch_size = batch_size , num_workers = 2, drop_last=True) #autoencoder_criterion = nn.MSELoss() optimizer_ft = optim.Adam(model.parameters(), lr = 1e-2) #optimizer_ft = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1) #********************************************************************************************************************************************************** def calc_loss(pred, target): autoencoder_criterion = nn.MSELoss() loss = autoencoder_criterion(pred, target) #m = nn.Sigmoid() #autoencoder_criterion = nn.BCELoss() #loss = autoencoder_criterion(m(pred), target) return loss def train_model(model, optimizer, scheduler, num_epochs=25): #model.train() # Set model to training mode for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) since = time.time() #model.train() # Set model to training mode run_loss = 0 for data in trainloader: optimizer.zero_grad() inputs,_ = data inputs = inputs.to(device) outputs = model(inputs) exit(0) loss = calc_loss(outputs, inputs) loss.backward(retain_graph=True) optimizer.step() #exit(0) time_elapsed = time.time() - since #print("loss=",loss.item()) run_loss += loss.item()*inputs.size(0) run_loss = run_loss / len(trainset) print("scheduler=",scheduler.get_last_lr()) scheduler.step() print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, run_loss)) print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) for idx in np.arange(3): save_image(outputs[idx],"Train_pred_output_image%d.png"%idx) save_image(inputs[idx],"Train_pred_input_image%d.png"%idx) #********************************************************************************************************************************************************** device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) num_class = 3 model = ResNetUNet(num_class).to(device) train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=50) #********************************************************************************************************************************************************** model.eval() # Set model to the evaluation mode inputs_test, _ = next(iter(testloader)) inputs_test = inputs_test.to(device) pred = model(inputs_test) #pred = F.sigmoid(pred) for idx in np.arange(3): save_image(inputs_test[idx],"result_input_image%d.png"%idx) save_image(pred[idx],"result_output_image%d.png"%idx)
st32281
Could you check, if the model parameters are getting a valid gradient by printing the .grad attributes of them after the first backward call? Also, why do you need to use retain_graph=True here? loss.backward(retain_graph=True) As a general guidance I would try to overfit a small data sample first (e.g. just 10 samples) and make sure the model is able to overfit it.
st32282
@ptrblck As per your suggestion, i checked the gradients of the parameters after the loss.backward() step. I get the gradients in the range of 10^-4 to 10^-7. You can find the gradients at the end of this post. For overfitting the model, I trained the model with exactly 10 images and i dont get the perfect reconstructed output. I have attached the inputs and the corresponding outputs after overfitting(training the model with 10 images). The inputs are normalized and are shown in the 1st and 3rd row. The corresponding outputs are shown in the 2nd and the 4th row. ******************************************************************************** Example of gradients obtained************************************************* Grad_before= None Grad_before_shape= torch.Size([64, 192, 3, 3]) Grad_after= tensor([[[[ 1.5444e-05, 1.6913e-05, 1.5995e-05], [ 1.6880e-05, 1.8491e-05, 1.7459e-05], [ 1.8331e-05, 1.9917e-05, 1.8404e-05]], [[ 1.9927e-05, 1.7666e-05, 1.5221e-05], [ 1.9537e-05, 1.6954e-05, 1.4170e-05], [ 1.9170e-05, 1.6359e-05, 1.3342e-05]], [[ 2.6886e-05, 2.8304e-05, 3.0287e-05], [ 2.7075e-05, 2.8953e-05, 3.1588e-05], [ 2.6412e-05, 2.8855e-05, 3.2180e-05]], ..., [[ 3.6494e-05, 4.1480e-05, 5.3497e-05], [ 3.5902e-05, 3.6543e-05, 4.6513e-05], [ 4.2026e-05, 3.5562e-05, 4.6378e-05]], [[ 2.1511e-05, 5.2198e-05, 8.9409e-05], [ 2.1499e-05, 5.0138e-05, 8.8784e-05], [ 2.2240e-05, 4.8883e-05, 8.4382e-05]], [[ 8.5568e-07, 3.9607e-08, 9.4165e-07], [ 1.3130e-08, -2.6260e-07, 1.0790e-07], [ 6.6315e-07, -4.9992e-08, 1.2894e-07]]], [[[-2.7049e-05, -2.1350e-05, -1.4579e-05], [-2.6697e-05, -2.0302e-05, -1.4146e-05], [-2.4956e-05, -1.8778e-05, -1.3871e-05]], [[-2.1540e-05, -1.5810e-05, -1.6859e-05], [-1.8347e-05, -1.3086e-05, -1.5404e-05], [-1.6027e-05, -1.1518e-05, -1.5272e-05]], [[-4.0438e-05, -4.8218e-05, -5.6844e-05], [-4.0368e-05, -4.9173e-05, -5.8418e-05], [-3.7268e-05, -4.7455e-05, -5.8082e-05]], ..., [[-1.3145e-05, -8.9089e-05, -1.2546e-04], [-2.2529e-05, -8.7684e-05, -1.1259e-04], [-4.3510e-05, -1.1628e-04, -1.4264e-04]], [[-8.6003e-05, -1.5247e-04, -1.5601e-04], [-8.3001e-05, -1.3158e-04, -1.4844e-04], [-9.7446e-05, -1.2937e-04, -1.4405e-04]], [[ 2.2745e-06, 8.6252e-06, 1.6749e-06], [ 1.0760e-06, 9.5257e-06, 4.2126e-06], [-5.9326e-06, 5.3898e-06, -7.5285e-06]]], [[[ 5.7302e-05, 3.6736e-05, -1.1809e-05], [ 5.0031e-05, 2.4906e-05, -2.9053e-05], [ 5.9241e-05, 2.9926e-05, -2.5830e-05]], [[-4.2512e-04, -5.1677e-04, -4.7505e-04], [-4.5070e-04, -5.4368e-04, -4.9834e-04], [-4.5768e-04, -5.5035e-04, -4.9912e-04]], [[-4.5637e-04, -4.6583e-04, -3.4601e-04], [-4.6171e-04, -4.5984e-04, -3.3626e-04], [-4.8818e-04, -4.7194e-04, -3.3743e-04]], ..., [[-6.0629e-03, -5.9946e-03, -5.1200e-03], [-6.2693e-03, -6.2557e-03, -5.3146e-03], [-6.2365e-03, -6.2213e-03, -5.2145e-03]], [[-7.2563e-04, -2.4535e-04, 2.9517e-04], [-8.3638e-04, -3.7627e-04, 1.9238e-04], [-6.2643e-04, -2.4000e-04, 2.5850e-04]], [[-2.7501e-03, -2.7126e-03, -2.3582e-03], [-2.8471e-03, -2.8696e-03, -2.5841e-03], [-2.6671e-03, -2.7258e-03, -2.4540e-03]]], ..., [[[ 5.3254e-05, 6.5035e-05, 1.0622e-04], [ 4.4166e-05, 5.8326e-05, 1.0131e-04], [ 3.2802e-05, 4.8616e-05, 9.2495e-05]], [[ 3.4394e-03, 3.5002e-03, 3.4879e-03], [ 3.4689e-03, 3.5324e-03, 3.5205e-03], [ 3.4897e-03, 3.5536e-03, 3.5402e-03]], [[ 3.6269e-03, 3.5958e-03, 3.5091e-03], [ 3.6331e-03, 3.5956e-03, 3.5011e-03], [ 3.6592e-03, 3.6140e-03, 3.5089e-03]], ..., [[ 6.5372e-02, 6.5775e-02, 6.5144e-02], [ 6.5743e-02, 6.6213e-02, 6.5610e-02], [ 6.5736e-02, 6.6206e-02, 6.5699e-02]], [[ 6.1241e-03, 5.5962e-03, 5.1829e-03], [ 6.1115e-03, 5.5773e-03, 5.1191e-03], [ 6.0164e-03, 5.5238e-03, 5.0697e-03]], [[ 2.9045e-02, 2.9361e-02, 2.9242e-02], [ 2.9073e-02, 2.9443e-02, 2.9367e-02], [ 2.8875e-02, 2.9244e-02, 2.9191e-02]]], [[[-2.7105e-05, -3.4002e-05, -5.2283e-05], [-2.6984e-05, -3.5369e-05, -5.5386e-05], [-2.6098e-05, -3.6343e-05, -5.6643e-05]], [[-1.6817e-04, -1.7624e-04, -1.5094e-04], [-1.7449e-04, -1.8054e-04, -1.5067e-04], [-1.7860e-04, -1.8413e-04, -1.5044e-04]], [[-2.2523e-04, -2.2414e-04, -1.7151e-04], [-2.2442e-04, -2.2134e-04, -1.6786e-04], [-2.2316e-04, -2.1915e-04, -1.6463e-04]], ..., [[-1.4295e-03, -1.6756e-03, -1.4744e-03], [-1.4044e-03, -1.6606e-03, -1.5211e-03], [-1.3774e-03, -1.5886e-03, -1.4934e-03]], [[-2.7282e-04, -8.7181e-05, -8.0031e-06], [-2.9121e-04, -1.0438e-04, -2.2774e-05], [-2.7456e-04, -1.0704e-04, -2.9238e-05]], [[-6.0413e-04, -5.5349e-04, -3.9992e-04], [-5.8806e-04, -5.5365e-04, -4.3254e-04], [-5.6101e-04, -5.3138e-04, -4.4425e-04]]], [[[-1.1781e-04, -1.2889e-04, -1.5897e-04], [-1.1206e-04, -1.2501e-04, -1.5591e-04], [-1.0279e-04, -1.1642e-04, -1.4722e-04]], [[-4.2027e-03, -4.2763e-03, -4.2969e-03], [-4.2305e-03, -4.3049e-03, -4.3248e-03], [-4.2183e-03, -4.2923e-03, -4.3104e-03]], [[-4.4182e-03, -4.4096e-03, -4.3487e-03], [-4.4423e-03, -4.4278e-03, -4.3601e-03], [-4.4392e-03, -4.4195e-03, -4.3446e-03]], ..., [[-7.9604e-02, -8.0565e-02, -8.0215e-02], [-7.9806e-02, -8.0850e-02, -8.0523e-02], [-7.9351e-02, -8.0334e-02, -8.0084e-02]], [[-7.3640e-03, -6.9757e-03, -6.6576e-03], [-7.3242e-03, -6.9142e-03, -6.5543e-03], [-7.2372e-03, -6.8390e-03, -6.4863e-03]], [[-3.5256e-02, -3.5790e-02, -3.5839e-02], [-3.5032e-02, -3.5616e-02, -3.5710e-02], [-3.4774e-02, -3.5336e-02, -3.5424e-02]]]], device='cuda:0')
st32283
As your model cannot overfit this small data set, you should try to debug it and play around with hyperparameters etc. to make sure it’s able to do so. E.g. you could remove some layers and work with a very simple model first (even a single conv layer could be a valid experiment).
st32284
I know this question is not directly related, But in my pytorch model I need to create this json file first. import glob import json import os LABEL = { 'neu': '01', #: 'neutral', 'fru': '02', #: 'calm', 'hap': '03', #: 'happy', 'sad': '04', #: 'sad', 'ang': '05', #: 'angry', 'fea': '06', #: 'fearful', 'exc': '07', #: 'disgust', 'sur': '08', #: 'surprised' 'xxx': '09', #: 'other' } PATH_TXT = glob.glob("E:/dataset/data/IEMOCAP_full_release/*/dialog/EmoEvaluation/S*.txt") PATH_WAV = glob.glob("E:/dataset/data/IEMOCAP_full_release/*/sentences/wav/*/S*.wav") PAIR = {} def getPair(): for path in PATH_TXT: with open(path, 'r') as f: fr = f.read().split("\t") for i in range(len(fr)): if (fr[i] in LABEL): PAIR[fr[i - 1]] = fr[i] def rename(): for i in PATH_WAV: for j in PAIR: if (os.path.basename(i)[:-4] == j): k = j.split('_') if (len(k) == 3): name = os.path.dirname(i) + '/' + k[0] + '-' + k[1] + '-' + LABEL[PAIR[j]] + '-01-' + k[2] + '.wav' os.rename(src=i, dst=name) print(name) ''' Ses01F_impro01_F000.wav k[0]:Ses01F k[1]:impro01 k[2]:F000 Ses01F-impro01-XX-01-F000.wav ''' elif (len(k) == 4): name = os.path.dirname(i) + '/' + k[0] + '-' + k[1] + '-' + LABEL[PAIR[j]] + '-01-' + k[2] + '_' + \ k[3] + '.wav' os.rename(src=i, dst=name) print(name) ''' Ses03M_script03_2_F032.wav k[0]:Ses03M k[1]:script03 k[2]:2 k[3]:F032 Ses03M-script03-XX-01-2_F032.wav ''' if __name__ == '__main__': pairPath = "E:/Python_On_All_Dataset/IEMO/IEMOCAP" # if (os.path.exists(pairPath)): # with open(pairPath, 'r') as f: # PAIR = json.load(f) # else: getPair() with open('my.json', 'w') as f: json.dump(obj=PAIR, fp=f) rename() when I run this ''main" function its throwing an error: saying permission error Now I am unable to run and regenerate the error because the second fun rename() is changing the filenames in my original dataset. So plz guide how to change this code at line os.rename(src=i, dst=name) so that the renamed files are stored in some other location, then probably I can save this dict name PAIR in json format. Regards
st32285
I’m not if I understand the issue correctly, but guess you might change the dst location to store the files to another location. In any case, since this is unrelated to PyTorch I would recommend to post it on StackOverflow or any other general Python discussion board to get a faster and better answer.
st32286
In that case could you explain the problem a bit more? It seems you’ve executed the script, some files were moved, but the script failed with a permission error (did you solve the permission issue?). Afterwards you wanted to continue the script, but are now running into issues since a subset was already moved?
st32287
sir I am trying your previous suggestion.just changing dst lets see if it works for me. I will let you know sir. regards
st32288
@ptrblck Hi sir, I have changed this dst to the required different folder as 'E:\\Python_On_All_Dataset\\IEMO\\long_codes\\multiscale_att_impro_only\\head_fusion-master\\IEMOCAP'. But its throwing the error: [WinError 183] Cannot create a file when that file already exists: 'E:/dataset/data/IEMOCAP_full_release\\Session1\\sentences\\wav\\Ses01F_impro01\\Ses01F_impro01_F000.wav' -> 'E:\\Python_On_All_Dataset\\IEMO\\long_codes\\multiscale_att_impro_only\\head_fusion-master\\IEMOCAP' Sir let me explain it again: Here I want to change the data files name which are stored in original directory of my dataset, and renamed files need to be stored at other location so that my original data files are not disturbed. But why is it saying these files already exist there. The destination folder IEMOCAP is empty Regards
st32289
Let’s try to use torch.profiler for ResNet18: import torch import torch.profiler import torchvision rn18 = torchvision.models.resnet18().to("cuda:0") rn18_params = {"x": torch.randn(16, 3, 224, 224).to("cuda:0")} def run_model_profiler(callback, dict_params, wait=1, warmup=2, active=5, row_limit=15): with torch.profiler.profile( schedule=torch.profiler.schedule(wait=wait, warmup=warmup, active=active), on_trace_ready=None, profile_memory=False, record_shapes=False, with_stack=False, with_flops=True, ) as prof: for _ in range(wait + warmup + active): _ = callback(**dict_params) prof.step() print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=row_limit)) run_model_profiler(rn18.forward, rn18_params) image2868×1126 399 KB Why sum of all values in Self CUDA % doesn’t equal to 100? It’s much more than 100. How to remove ProfilerStep* from result? I don’t want so that result contains it and affects percent stats.
st32290
I am having trouble understanding exactly what this line means in the docs 12… grad_outputs (sequence of Tensor) – The “vector” in the Jacobian-vector product. Usually gradients w.r.t. each output. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable for all grad_tensors, then this argument is optional. Default: None. I see this thread 46 which partially explains it (None is equivalent to passing in torch.ones(...) of the proper size) but I still don’t really understand what it is for or what it should be used for. Any input? Thanks
st32291
Solved by albanD in post #4 In most cases, you can do without it, but for example, you can replace: loss = l1 + 2 * l2 autograd.grad(loss, inp) by autograd.grad((l1, l2), inp, grad_outputs=(torch.ones_like(l1), 2 * torch.ones_like(l2)) Which is going to be slightly faster. Also some algorithms require you to compute x * J …
st32292
Hi, None is equivalent to passing in torch.ones(...) of the proper size This is only true for an output with a single element! Otherwise, you can see these outputs as providing dL/dout (where L is your loss) so that the autograd can compute dL/dw (where w are the parameters for which you want the gradients) as dL/dw = dL/dout * dout/dw. Another way to see this as mentioned in the doc is that autograd only computes a vector matrix product between a vector v and the Jacobian of the function. grad_outputs allow you to specifiy this vector v.
st32293
Thanks for your answer, so the vector passed in will not be mutated, but it will have an effect on the final gradients that come out of the grad function? Is there a simple use case to illustrate why someone would need this?
st32294
In most cases, you can do without it, but for example, you can replace: loss = l1 + 2 * l2 autograd.grad(loss, inp) by autograd.grad((l1, l2), inp, grad_outputs=(torch.ones_like(l1), 2 * torch.ones_like(l2)) Which is going to be slightly faster. Also some algorithms require you to compute x * J for some x. You can avoid having to compute the full Jacobian J by simply providing x as a grad_output.
st32295
albanD: This is only true for an output with a single element! Thanks for the help. Just one more thing. It seems that by the code you posted, passing in torch.ones(...) will not have a material affect on the final outcome, right? seems like that conflicts with the comment about a single element, but I am not sure
st32296
I assume above that l1 and l2 are scalar value! Sorry I just use ones_like() to get a Tensor with a 1 on the right device and with the right dtype.
st32297
this example really useful for me to understand the grad_outputs argument, I think it could be added to the document of autograd to help more people like me
st32298
Thanks for that answer, I would add that torch.ones could be seen as the derivative of the identity map, in this way the backward differentiation can be initialized. It acts as a seed in some sense !
st32299
So I’m trying to implement a layer that performs weight sharing on a per-neuron basis. I’ll give a basic example: We have 2 scalar parameters, a and b, and we have 3 dimensional inputs and outputs, and we want the weight matrix to be given by: a b b b a b b b a One way I came up with is this: class WeightSharing(nn.Module): def __init__(self): super().__init__() shared_weights = torch.nn.Parameter(torch.randn(2)) self.register_parameter("shared_weights",shared_weights) self.index_map = torch.LongTensor([0,0,1, 0,1,0, 1,0,0]) def forward(self, x): W = self.shared_weights[self.index_map].view(3, 3) return torch.matmul(x, W) The problem is that indexing the shared weights on each forward pass seems to be detrimental to performance. Can anyone recommend a better approach to this? Is there a layer that can perform a view based on a fixed index mapping?
st32300
Hello, I’m trying to create a custom Bert model by adding a bilstm layer on top of a pretrained Bert model. I have the below code snippet that is causing problems : class BertClassifier(nn.Module): """Bert Model for Classification Tasks.""" def __init__(self, freeze_bert=False): super(BertClassifier, self).__init__() # Specify hidden size of BERT, hidden size of our classifier, and number of labels D_in, H, D_out = 768, 50, 2 # Instantiate BERT model self.bert = BertModel.from_pretrained('bert-base-multilingual-uncased') self.lstm = nn.LSTM(D_in, H, batch_first=True, bidirectional=True) self.linear = nn.Linear(H*2 , D_out) # Freeze the BERT model if freeze_bert: for param in self.bert.parameters(): param.requires_grad = False def forward(self, input_ids, attention_mask): # Feed input to BERT outputs = self.bert(input_ids=input_ids,attention_mask=attention_mask) sequence_output = outputs[0] print("sequence_output size", sequence_output.size()) sequence_output, _ = self.lstm(sequence_output) print("lstm size", sequence_output.size()) linear_output = self.linear(sequence_output) print("linear_output size", linear_output.size()) return linear_output def train(model, train_dataloader, val_dataloader=None, epochs=4, evaluation=False): """Train the BertClassifier model.""" # Start training loop print("Start training...\n") for epoch_i in range(epochs): # Print the header of the result table print(f"{'Epoch':^7} | {'Batch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}") print("-" * 70) # Measure the elapsed time of each epoch t0_epoch, t0_batch = time.time(), time.time() # Reset tracking variables at the beginning of each epoch total_loss, batch_loss, batch_counts = 0, 0, 0 # Put the model into the training mode model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): batch_counts += 1 # Load batch to GPU b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch) # Zero out any previously calculated gradients model.zero_grad() # Perform a forward pass. This will return logits. logits = model(b_input_ids, b_attn_mask) # Compute loss and accumulate the loss values loss = loss_fn(logits, b_labels) batch_loss += loss.item() total_loss += loss.item() # Perform a backward pass to calculate gradients loss.backward() # Clip the norm of the gradients to 1.0 to prevent "exploding gradients" torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and the learning rate optimizer.step() scheduler.step() # Print the loss values and time elapsed for every 20 batches if (step % 20 == 0 and step != 0) or (step == len(train_dataloader) - 1): # Calculate time elapsed for 20 batches time_elapsed = time.time() - t0_batch # Print training results print( f"{epoch_i + 1:^7} | {step:^7} | {batch_loss / batch_counts:^12.6f} | {'-':^10} | {'-':^9} | {time_elapsed:^9.2f}") # Reset batch tracking variables batch_loss, batch_counts = 0, 0 t0_batch = time.time() # Calculate the average loss over the entire training data avg_train_loss = total_loss / len(train_dataloader) print("-" * 70) #Evaluation if evaluation == True: # After the completion of each training epoch, measure the model's performance # on our validation set. val_loss, val_accuracy = evaluate(model, val_dataloader) # Print performance over the entire training data time_elapsed = time.time() - t0_epoch print( f"{epoch_i + 1:^7} | {'-':^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {time_elapsed:^9.2f}") print("-" * 70) print("\n") print("Training complete!") So when i run the code, it returns the error Expected target size (32, 2), got torch.Size([32]) on line loss = loss_fn(logits, b_labels) ( in the training function). These are the sizes : sequence_output size torch.Size([32, 64, 768]) lstm size torch.Size([32, 64, 100]) linear_output size torch.Size([32, 64, 2]) I tried reshaping the linear output with linear_output = linear_output.view(batch_size, 2) but after that it threw the error shape '[32, 2]' is invalid for input of size 4096 on line linear_output = linear_output.view(batch_size, 2) Any help or advice will be much appeciated.
st32301
The nn.LSTM layer’s sequence_output will have the shape [batch_size, seq_len, nb_features] (in the batch_first=True setup) as described in the docs. The following linear layer will thus be applied to all time steps in the seq_len dimension and will return [batch_size=32, seq_len=62, nb_features=D_out=2]. nn.CrossEntropyLoss will understand this shape as [batch_size, nb_classes, additional_dim] and expects the target to have the shape [batch_size, additional_dim]. I assume you would like to use the last time step only, so you could index the lstm output via sequence_output[:, -1] or reduce this tensor in any other way.
st32302
Thank you so much @ptrblck ! I modified linear_output to linear_output = self.linear(sequence_output[:, -1]) and the training started, hopefully I won’t encounter other issue during training. I do have 2 more questions if you could also help with this. As I am new to the entire pytorch and bert, lstm, is the forward function computed fine for a binary classification? The training works, but I’m wondering if it is a correct way to do it. The second one would be regarding CUDA. At the moment the training runs on CPU but it takes hours to train. I have the latest CUDA version and the problem I have is that it says it ran out of memory right before training. I already tried to lower the batch size as much as I could ( to 16,4,1) but this didn’t resolved the problem. I don’t know if it’s a relevant thing but when I run the code on GPU, I checked the GPU usage on task manager and from 0% it goes to 5%,9% and when it throws the error it jumps to 100% and the running code stops and throws the error right before training. Any idea what else i can do to resolve this problem? Thank you!
st32303
I’m unsure what this indexing sequence_output = outputs[0] is grabbing, but besides that the code looks alright (at least I don’t see any obvious issues). Since you are using two output units in the last linear layer, I assume you are using nn.CrossEntropyLoss for the 2-class multi-class classification? While this would work, you could also return a single output and use nn.BCEWithLogitsLoss instead, so you might want to experiment with it. If you are running out of memory on the GPU you would indeed need to lower the memory usage by e.g. lowering the batch size. If it’s already a single sample, you could use e.g. torch.utils.checkpoint to trade compute for memory.
st32304
sequence_output = outputs[0] was initially # Extract the last hidden state of the token[CLS] for classification task sequence_output = outputs[0][:, 0, :] but that turned the tensor into a 2 dimensional and i needed a 3 dimensional for lstm. I tried to use that and reshape the tensor so it will work with lstm but that brought a lot of issues after. Yes, i am using nn.CrossEntropyLoss. By using torch.utils.checkpoint do i have to make modifications just in the forward function or in the entire code as well? Do you know any tutorial on using this?
st32305
I was using this notebook 7 in the past as a guide to use checkpointing. It might be a bit outdated, but could still be useful.
st32306
Is there some built-in method for calculating prediction accuracy over sequences of softmax/logsoftmax vectors? So i have a batch of sequences, the inputs padded by some padding index, the targets of identical lengths where the targets are class indices and the prediction of the network is in the form of softmax or logsoftmax scores. I would like to calculate the accuracy of the predictions on the current batch as efficiently as possible. This means, all the elements in each of the sequences where the input is a padding index need to get ignored and for the rest, the prediction index needs to get calculated as the index of the element in the output vector with the highest prob or log/prob and then all the correctly predicted indices need to get counted etc. The loss functions like LogSoftmax automatically take care of ignoring padding indices. Is there an equivalent method for calculating accuracy? And also, what if the sequences are packed, is there are method for that situation?
st32307
Hi everyone, I know this topic was previously discussed, however, the proposed solutions didn’t work for me. I am trying to perform classification of precomputed features into 7 categories using logistic regression. I got the following error when training the classifier: ValueError: Expected target size (32, 7), got torch.Size([32]) My target shape was ([768,1]) and squeezing it didn’t solve the problem. The input shape also is torch.Size([768, 1, 221]) By squeezing it, I got this error: RuntimeError: Expected object of scalar type Long but got scalar type Int for argument #2 'target' To train the logistic regression model, I used this piece of code which works steadily with another dataset: #define classifier num_input = trainingData.shape[-1] num_classes = trainingLabels.cpu().unique().numel() model = Sequential(Linear(num_input, num_classes), LogSoftmax(dim=1)) optimizer = Adam(model.parameters()) criterion = NLLLoss() batch_size = 32 num_epochs = 50 #learning rate lr = 1e-4 nsamples = trainingData.shape[0] nbatches = nsamples // batch_size for e in range(num_epochs): perm = torch.randperm(nsamples) for i in range(nbatches): idx = perm[i * batch_size : (i+1) * batch_size] model.zero_grad() resp = model.forward(trainingData[idx]) trainingLabels = trainingLabels.squeeze() loss = criterion(resp, trainingLabels[idx]) loss.backward() optimizer.step() resp = model.forward(trainingData) avg_loss = criterion(resp, trainingLabels) Obviously, my problem is in the data shape but I can not fix it may be because I am new to pytorch. Any help will be appreciated.
st32308
Hi Salma, Based on the error message, it seems you have an additional dimension in your model output. If you just would like to classify the data in 7 classes (not-pixel wise classification etc.), your output should have the shape [batch_size, nb_classes], while your target should be a torch.LongTensor containing the class indices in the shape [batch_size]. Try to squeeze the model output and convert your target as: target = target.long() If that doesn’t help, could you post the input, output and target shapes, so that we could debug this issue further? Also a small issue in your code: You should call the model directly (output = model(data)), since this will make sure to register all hooks etc. Currently you are using model.forward(data) which might yield some issues in the future. While your code should work fine using your manual batching approach, you could use a Dataset and DataLoader instead, which will make shuffling, preprocessing the data etc. a bit easier.
st32309
Yes there was an additional dimension in the input and the target. I squeezed the input and I converted the target to target.long And it worked. Thank you!
st32310
I have a similar error I am posting it here because it is similar and I tried this method and its not working. I was trying to do cross entropy loss and I got this error “Expected target size (1, 5), got torch.Size([1, 17451])” My cls_preds.size()= torch.Size([1, 17451, 5]) and cls_targets.size()= torch.Size([1, 17451]) and I was trying to calculate Loss by following code loss = nn.CrossEntropyLoss() output = loss(cls_preds, cls_targets) Please provide some explanation, thank you.
st32311
nn.CrossEntrolyLoss expects a model output in the shape [batch_size, nb_classes, *additional_dims] and a target in the shape [batch_size, *additional_dims] containing the class indices in the range [0, nb_classes-1]. Based on your output shape it seems you are dealing with 17451 classes and a temporal dimension of 5. The target should thus have a shape of [1, 5] (note the missing channel dimension). Also, it seems your target might be one-hot encoded, as it’s using the class dimension in dim1, but is missing the temp. dim? Maybe your use case is vice versa and you are dealing with 5 classes. In this case, you would have to permute the output so that it has the shape [1, 5, 17451].
st32312
Yes you are right my class_num = 5 and number of batches = 1 and the other dimension is number of anchors. and I did cls_preds.permute(0,2,1) and my shape is now [1, 5, 17451]. Now I am getting an error saying “RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes’ failed. at /pytorch/aten/src/THNN/generic/SpatialClassNLLCriterion.c:111” in nn.nll_loss2d. any help? Thank you.
st32313
If you are dealing with the shape [1, 5, 17451], I assume that each “anchor” belongs to one of the 5 classes? If so, then the target should contain class indices in the range [0, 4].
st32314
Actually I tried number of anchor boxes as batch number and it got solved. It make sense? Thanks anyway.
st32315
Hi, I face the same problem as Naveen, shape of my output is torch.Size([1, 20, 4]), where 20 is the time sequence, 4 is the number of classes, softmax was used as the final activitation function, and loss is nn.CrossEntropyLoss(), I use the follow expression: loss = loss_func(output, torch.max(label, 2)[1]) I got this error: ValueError: Expected target size (2, 4), got torch.Size([2, 20]) But if I changed my time sequence to 4, this error disappeared, why?
st32316
dbld_blqq: Hi, I face the same problem as Naveen, shape of my output is torch.Size([1, 20, 4]), where 20 is the time sequence, 4 is the number of classes That would be wrong as explained here: ptrblck: nn.CrossEntrolyLoss expects a model output in the shape [batch_size, nb_classes, *additional_dims] and a target in the shape [batch_size, *additional_dims] containing the class indices in the range [0, nb_classes-1]. so you would have to permute the model output such that the class dimension is in dim1. dbld_blqq: softmax was used as the final activitation function, and loss is nn.CrossEntropyLoss() That is unfortunately also wrong, since nn.CrossEntropyLoss expects logits, so you would have to remove the softmax.
st32317
I want to create a model which takes two inputs digit image from MNIST dataset random number from 0-9 Output Sum of two numbers How to create this model using pytorch
st32318
I am using __getitem__ to grab multiple embeddings from a file system and concat them into a single sample which is then sent to my dataloader for batching. The dataset is not very well sanitised and occasionally I will need to skip a sample. Is there a way to return and skip a sample, or iterate __getitem__ from within the method itself? Alternatively - should this code be put somewhere else? Thanks for any help. Ed
st32319
Solved by the-dharma-bum in post #2 Could you provide a bit more informations about your use case ? Why can’t you identify indices that correspond to corrupted file, for instance during the dataset instanciation, by looping one time over all the files, or even simplier identify those files before the dataset instanciation ? In your …
st32320
Could you provide a bit more informations about your use case ? Why can’t you identify indices that correspond to corrupted file, for instance during the dataset instanciation, by looping one time over all the files, or even simplier identify those files before the dataset instanciation ? In your training loop, I assume you’re doing something alike for batch in dataloader: inputs, targets = batch outputs = model(inputs) ... Can’t you detect batches to skip in this loop ? If for some reasons you can’t do any of these propositions, you have to possibility to define a Sampler object that will be use by a DataLoader object to samples indices. Those indices will then be used to call the __getitem__ method of a Dataset object. Check the doc of pytorch.utils.data 1 for more infos. Perhaps in the __next__ method of your sampler you could try and detect corrupted files and avoid yielding them so that the __getitem__ method will never be called with their corresponding indices. I hope I’m being clear, feel free to ask for more details of course.
st32321
Thank you @the-dharma-bum for your reply. It seems that a custom sampler is exactly what I require in this case. This will allow me to check that all samples in the [idx] folder have been processed correctly before sending to the dataloader. This is a great solution for me as the dataset is being processed while I am experimenting with some model architectures. Identifying incompete samples during init wouldn’t be possible due to the dataset size. Thank you for your help!
st32322
Hi folks, I’m running some pytorch code on a few spare workstations at work. I’m hitting an issue where workstations report no cuda device seemingly randomly, and then seem to come good after a period of time. Out of a pool of maybe 30 workstations, around 10 will get this issue at any one time. From day to day (or hour by hour) different workstations are affected. At first I thought this was due to the display sleeping, but today I have observed it when a user is logged in and using the desktop environment. From searching, it seems this can happen when drivers are updated but the machine is not rebooted - this is not happening in my case. Kind of baffled, any help appreciated. I made an effort to find a solution in previous posts, but none of the answers seemed to apply to me. Could it be a mismatch in the CUDA version installed with the driver (11.2) and the version I’m using in my container (11.0)? cheers >>> torch.cuda.is_available() /opt/conda/envs/pytorch/lib/python3.7/site-packages/torch/cuda/__init__.py:52: UserWarning: CUDA initialization: CUDA unknown error - this may be due to an incorrectly set up environment, e.g. changing env variable CUDA_VISIBLE_DEVICES after program start. Setting the available devices to be zero. (Triggered internally at /opt/conda/conda-bld/pytorch_1607370156314/work/c10/cuda/CUDAFunctions.cpp:100.) return torch._C._cuda_getDeviceCount() > 0 Singularity> nvcc --version nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2020 NVIDIA Corporation Built on Wed_Jul_22_19:09:09_PDT_2020 Cuda compilation tools, release 11.0, V11.0.221 Build cuda_11.0_bu.TC445_37.28845127_0 Singularity> python Python 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0] :: Anaconda, Inc. on linux Type "help", "copyright", "credits" or "license" for more information. >>> import torch >>> torch.__version__ '1.7.1' >>> torch.version.cuda '11.0' +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.67 Driver Version: 460.67 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Quadro M5000 Off | 00000000:03:00.0 On | Off | | 43% 49C P0 48W / 150W | 649MiB / 8126MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 17617 G /usr/bin/X 521MiB | | 0 N/A N/A 52214 G ...AAAAAAAAA= --shared-files 118MiB | +-----------------------------------------------------------------------------+
st32323
I doubt the issue is caused by the local CUDA toolkit installation and it is usually caused by a misconfiguration of the system. Since you’ve already eliminated driver updates and are seeing this issue randomly occurring and disappearing, it would be interesting to know what “state” the systems are in when the error happens. I.e. are these system in any kind of hibernate status etc.
st32324
Cheers, so we’re unable to find any sort of pattern unfortunately. We have taken PyTorch itself out of the equation, but reproducing the cuda availability issue in TensorFlow and also Blender. I put a forum post 3 over at the Nvidia forums after this discovery) I can confirm the machines are not in any hibernate status. We’ve seen this error happen even when a user is logged into a Gnome session and using OpenGL/OpenCL software such as Autodesk Maya (nvidia-smi type C+G). Rebooting the affected machine seems to fix. It’s very weird. Graphics work fine but CUDA intermittently drops out.
st32325
even my indices is lower than embedding_dim-1 within a batch , i am still getting IndexError: index out of range in self , here is my data and code. In each batch for Age category size is 20 and input embedding size is 70 , dont know why indexing error is throwing data = pd.read_csv('Churn_Modelling.csv') print("Shape:", data.shape) data.head() X_train = data[['Age','Balance']] y_train = pd.DataFrame(data['Exited']) X_train Shape: (10000, 14) Age Balance ---- ------- 0 42 0.00 1 41 83807.86 2 42 159660.80 3 39 0.00 4 43 125510.82 10000 rows × 2 columns y_train Exited ------- 0 1 1 0 2 1 3 0 4 0 10000 rows × 1 columns features = ['Age'] for col in features: X_train.loc[:,col] = X_train.loc[:,col].astype('category') X_train.dtypes Age category Balance float64 dtype: object embedded_cols = {n: len(col.cat.categories) for n,col in X_train[features].items()} embedded_cols {'Age': 70} class ShelterOutcomeDataset(Dataset): def __init__(self, X, Y, embedded_col_names): X = X.copy() self.X1 = X.loc[:,embedded_col_names].copy().values.astype(np.int64) #categorical columns self.X2 = X.drop(columns=embedded_col_names).copy().values.astype(np.float32) #numerical columns self.y = Y.copy().values.astype(np.int64) def __len__(self): return len(self.y) def __getitem__(self, idx): return self.X1[idx], self.X2[idx], self.y[idx] embedding_sizes = [(n_categories, min(50, (n_categories+1)//2)) for _,n_categories in embedded_cols.items()] embedding_sizes [(70, 35)] train_ds = ShelterOutcomeDataset(X_train,y_train , ['Age']) class testNet(nn.Module): def __init__(self, emb_dims, n_cont): super().__init__() self.embeddings = nn.ModuleList([nn.Embedding(categories, size) for categories,size in emb_dims]) no_of_embs = sum(e.embedding_dim for e in self.embeddings) #length of all embeddings combined self.n_emb, self.n_cont = no_of_embs, n_cont self.lin1 = nn.Linear(self.n_emb + self.n_cont,6) self.lin2 = nn.Linear(6, 4) self.lin3 = nn.Linear(4, 2) self.bn1 = nn.BatchNorm1d(self.n_cont) self.bn2 = nn.BatchNorm1d(6) self.bn3 = nn.BatchNorm1d(4) self.emb_drop = nn.Dropout(0.6) self.drops = nn.Dropout(0.3) def forward(self, x_cat, x_cont): x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] x = torch.cat(x, 1) x = self.emb_drop(x) # batch normalization over continous features x2 = self.bn1(x_cont) # concatenate both embedding and continous feature , here 1 means dim # the dimension over which the tensors are concatenated we are concatenating columns x = torch.cat([x, x2], 1) x = F.relu(self.lin1(x)) x = self.drops(x) x = self.bn2(x) x = F.relu(self.lin2(x)) x = self.drops(x) x = self.bn3(x) x = self.lin3(x) return x import torch.nn as nn criterion = nn.CrossEntropyLoss() def train_model(model, optim, train_dl): model.train() total = 0 sum_loss = 0 for cat, cont, y in train_dl: batch = y.shape[0] print(cat.size()) # <--- size of features whihc has to be embeded y = y.to(torch.float32) output = model(cat, cont) _,pred = torch.max(output,1) loss = criterion(output, y.squeeze(1).long()) optim.zero_grad() loss.backward() optim.step() total += batch sum_loss += batch*(loss.item()) return sum_loss/total,pred def train_loop(model, epochs, lr=0.01, wd=0.0): optim = get_optimizer(model, lr = lr, wd = wd) for epoch in range(epochs): loss,pred = train_model(model, optim, train_dl) if (epoch+1) % 5 ==0: print(f'epoch : {epoch+1},training loss : {loss}, output : {output}') batch_size = 20 train_dl = DataLoader(train_ds, batch_size=batch_size,shuffle=True) #valid_dl = DataLoader(valid_ds, batch_size=batch_size,shuffle=True) train_dl = DeviceDataLoader(train_dl, device) # valid_dl = DeviceDataLoader(valid_dl, device) # model = ShelterOutcomeModel(embedding_sizes,0) model = testNet(embedding_sizes,1) print(model) from collections import defaultdict opt = torch.optim.Adam(model.parameters(), lr=1e-2) # to_device(model, device) train_loop(model, epochs=100, lr=0.01, wd=0.00001) testNet( (embeddings): ModuleList( (0): Embedding(70, 35) ) (lin1): Linear(in_features=36, out_features=6, bias=True) (lin2): Linear(in_features=6, out_features=4, bias=True) (lin3): Linear(in_features=4, out_features=2, bias=True) (bn1): BatchNorm1d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (bn2): BatchNorm1d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (bn3): BatchNorm1d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (emb_drop): Dropout(p=0.6, inplace=False) (drops): Dropout(p=0.3, inplace=False) ) torch.Size([20, 1]) --------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-3281-888e52d4559c> in <module> 74 # to_device(model, device) 75 ---> 76 train_loop(model, epochs=100, lr=0.01, wd=0.00001) <ipython-input-3281-888e52d4559c> in train_loop(model, epochs, lr, wd) 46 optim = get_optimizer(model, lr = lr, wd = wd) 47 for epoch in range(epochs): ---> 48 loss,pred = train_model(model, optim, train_dl) 49 if (epoch+1) % 5 ==0: 50 print(f'epoch : {epoch+1},training loss : {loss}, output : {output}') <ipython-input-3281-888e52d4559c> in train_model(model, optim, train_dl) 15 16 ---> 17 output = model(cat, cont) 18 _,pred = torch.max(output,1) 19 ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(), <ipython-input-3280-681fc4d5712d> in forward(self, x_cat, x_cont) 30 31 ---> 32 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] 33 x = torch.cat(x, 1) 34 <ipython-input-3280-681fc4d5712d> in <listcomp>(.0) 30 31 ---> 32 x = [e(x_cat[:,i]) for i,e in enumerate(self.embeddings)] 33 x = torch.cat(x, 1) 34 ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(), ~/anaconda3/lib/python3.8/site-packages/torch/nn/modules/sparse.py in forward(self, input) 122 123 def forward(self, input: Tensor) -> Tensor: --> 124 return F.embedding( 125 input, self.weight, self.padding_idx, self.max_norm, 126 self.norm_type, self.scale_grad_by_freq, self.sparse) ~/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse) 1812 # remove once script supports set_grad_enabled 1813 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) -> 1814 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) 1815 1816 IndexError: index out of range in self
st32326
I am confused how to save model for the purpose of resume training I have a function def fit(model,train,val): ..... ..... return loss I configure model as follow model=MyModel.to(device) criterion = nn.BCEWithLogitsLoss() opt=torch.optim.AdamW(params=model.parameters(),lr=0.001) Then I call function as loss=fit(model,train,val) Do I need to return optimizer and model from my fit function to save checkpoint as follow state = { 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': opt.state_dict(), } savepath='0to5.pt' torch.save(state,savepath)
st32327
sorry, its not clear to me. I am not sure whether model variable out of function is updated or we need to return model object
st32328
Suppose I need to add a l2 regularization term to my loss, and its calculated like below regularizer = torch.tensor(0.) for name, param in model.named_parameters(): if 'weight' in name: regularizer += torch.sum(torch.norm(param, dim=0)) I have two questions one is that, for the initilization of ‘regularizer’, do I need to set the ‘requires_grad=True’ regularizer = torch.tensor(0., requires_grad=True) Second one is that, is the inplace addition appropriate here? Do I need to modify it to regularizer = regularizer + torch.sum(torch.norm(param, dim=0))
st32329
No, you don’t need to use requires_grad attribute and could even create the initial regularizer variable as a Python scalar: regularizer = 0. As long as Autograd doesn’t raise an error, the inplace operation should work fine. In case you are concerned about scripting the model (and loss calculation), you could check if removing the inplace ops would allow more operator fusions, but that might be an edge case.
st32330
I’m trying to get more familiar with ATen by playing around with it directly. I would like to run the examples from the cppdocs, for example #include <ATen/ATen.h> at::Tensor a = at::ones({2, 2}, at::kInt); at::Tensor b = at::randn({2, 2}); auto c = a + b.to(at::kInt); I’m not super familiar with C++, so what I did was create a new C++ project my_project such that I get the following folder structure: + pytorch/ | + my_project/ and in my_project/ create a main.cpp: #include "../pytorch/aten/src/ATen/ATen.h" int main() { at::Tensor a = at::ones({2, 2}, at::kInt); at::Tensor b = at::randn({2, 2}); auto c = a + b.to(at::kInt); return 0; } The include seems to be successful in that it finds the folder, however when I try to build my_project I get the following error: In file included from .../aten_debug/main.cpp:2: .../aten_debug/../pytorch/aten/src/ATen/ATen.h:7:10: fatal error: 'c10/core/Allocator.h' file not found`. What do I have to do to get this to work? Note please that pytorch isn’t build, it’s just the repository, since I’m working on a laptop. I have the possibility to use a remote server with built PyTorch, but I’m not sure whether this is the problem or just some trivial misunderstanding due to my lacking C++ knowledge.
st32331
You could follow this tutorial 1, which explains how to write the CMakeLists.txt, setup the general project, and build it, as it seems you are trying to use a manual setup, which is missing some includes.
st32332
I have a neural network that has multiple output layers for softmax probability depending on the state my agent is in. Here is an example of my network: class policy(nn.Module): def __init__(self): hidden_layer = 32 super(policy, self).__init__() self.affine1 = nn.Linear(3, hidden_layer) self.affine2 = nn.Linear(hidden_layer, hidden_layer) self.output1 = nn.Linear(hidden_layer, 10) self.output2 = nn.Linear(hidden_layer, 5) self.output3 = nn.Linear(hidden_layer, 3) def forward(self, x): x = torch.nn.functional.relu(self.affine1(x)) x = torch.nn.functional.relu(self.affine2(x)) outputprobs1 = torch.nn.functional.softmax(self.output1(x), dim=-1) outputprobs2 = torch.nn.functional.softmax(self.output2(x), dim=-1) outputprobs3 = torch.nn.functional.softmax(self.output3(x), dim=-1) return outputprobs1, outputprobs2, outputprobs3 The softmax probabilities indicate a certain action my agent will perform, but the agent has different actions based on different states. Because I know which actions my agent should perform, I want to train the policy through supervised learning. I am planning to use torch.nn.CrossEntropyLoss(), as it is a multi-classification problem. Moreover, during each episode, the model chooses from the output probabilities multiple times. For example, let’s say that there are 3 states my agent can be in, A, B, and C. In state A, the agent uses output1, in state B, the agent uses output2, and in state C, the agent uses output 3. As a result, one example through an episode might be: Agent starts in state A: chooses action 9 Now agent is in state C: chooses action 2 Now agent is in state B: chooses action 4 Now agent is in state A: chooses action 5 Here are my following questions: How would I train this policy through supervised learning? The website for CrossEntropyLoss says I need a input of shape (N, C). So my C here would be 10, 5, and 3 respectively. My thinking is that I would need 3 of these inputs, one for each output layer. As a result, would I need a separate loss function for each output layer, or can I just use one loss function for all of them? Would propagating the loss for one of the output layers affect the others negatively through supervised learning? Moreover, I would like more clarification on how to obtain the logits needed for the input to CrossEntropyLoss. In order to obtain this, would I use torch.logit(outputprobs1)? Any help with this is much appreciated, thank you!
st32333
x that you pass to your forward function represents what? As I mentioned here, one loss function is enough, it’s just one function.
st32334
Hi, is there a PyTorch equivalent for the following and alike? https://docs.nvidia.com/deeplearning/frameworks/tensorflow-user-guide/index.html#tf_enable_winograd_nonfused 2 I wonder, in TF it is integrated into the framework, but maybe it is possible to do the same from the cudnn dynamic lib?
st32335
No, currently you cannot disable specific cudnn algorithms manually. We are working on the enablement of the cudnn v8 API, which allows more flexible filtering.
st32336
I have the following model architecture, which essentially is a 5 layer LSTM that takes in 62 length strings and outputs classification predictions based on that. Because of how the data works, the first 3-5 characters are more important for the classification than the remainder of the strings. How do I get the model to place more weight on the first three characters??? #basic model, need to modify to situation class NLP_model(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, num_classes): super(NLP_model, self).__init__() self.char_embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers = 5, bidirectional= True) self.fc = nn.Linear(hidden_dim*2,num_classes) #self.att = nn.MultiheadAttention(embed_dim, num_heads, ...) def forward(self, x): x = self.char_embedding(x) output, hidden = self.lstm(x) hidden = torch.cat((hidden[0][-2,:,:], hidden[0][-1,:,:]), dim=1) x = self.fc(hidden[0]) return x #modify for os model_os = NLP_model(len(alphabet), 12, 24, lenos) #Find the number of fismaids for last variable criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model_os.parameters(), lr = 0.001) for epoch in range(10): #Look into DataLoader for batch processing y = list() z = list() for sentence, label in zip(ls_X_train_os, ls_y_train_os): #training_data should be an array of hostnames and labels model_os.zero_grad() output = model_os(sentence_to_id(sentence)) #sentence is the hostname, label is the fismaid #print(label) #print(output.shape) temp_label = label label = torch.zeros(lenos) label[temp_label] = 1.0 #label = torch.tensor(label).unsqueeze(0) #label = torch.tensor([label]).unsqueeze(1) #label = torch.tensor(label).unsqueeze(1) #print(label.shape) loss = criterion(output, label) loss.backward() optimizer.step() y.append(loss.item()) y_true = [] y_pred = [] model_os.eval() for sentence, label in zip(ls_X_test_os, ls_y_test_os): temp_label = label label = torch.zeros(lenos) label[temp_label] = 1.0 output = model_os(sentence_to_id(sentence)) loss = criterion(output, label) z.append(loss.item()) pred = output.detach().numpy pred = np.argmax(pred) y_pred.append(pred) y_true.append(temp_label) print(f'epoch {epoch} training loss: {np.array(y).mean()}') print(f'testing loss : {np.array(z).mean()}') print(f'recall: {recall_score(y_true, y_pred, average="weighted")}') print(f'precision: {precision_score(y_true, y_pred, average="weighted")}') print(f' f1: {f1_score(y_true, y_pred, average="weighted")}') print(f'accuracy: {accuracy_score(y_true, y_pred)}')
st32337
Solved by ptrblck in post #4 The workflow to get the predictions sounds correct for the inference use case, but I assume won’t be used to train the model. Here is a small example of what I had in mind: # setup batch_size, nb_classes, seq_len = 2, 3, 4 output = torch.randn(batch_size, nb_classes, seq_len, requires_grad=True) t…
st32338
I assume that the model output and target tensors contain a temporal dimension, which represents the logits and targets for each time step, respectively? If so, you could create the unreduced loss by specifying reduction='none' while creating the criterion, and weight the loss using a custom weight tensor indicating the “importance” for each time step. Afterwards you could reduce the loss (e.g. by using the mean) and calculate the gradients via the backward operation.
st32339
The output is an array of probabilities for each category, from which I select the highest one to find the predicted outcome. I’m new to data science, so I’m not sure if that is what you mean by logits and targets for each time step. It certainly is a group of probabilities, or logit, but I’m not certain about the time-step. Is there an example of using a custom weight tensor I can look to?
st32340
The workflow to get the predictions sounds correct for the inference use case, but I assume won’t be used to train the model. Here is a small example of what I had in mind: # setup batch_size, nb_classes, seq_len = 2, 3, 4 output = torch.randn(batch_size, nb_classes, seq_len, requires_grad=True) target = torch.randint(0, 2, (batch_size, nb_classes, seq_len)).float() criterion = nn.BCEWithLogitsLoss(reduction='none') # create decreasing weight weights = torch.arange(seq_len, 0, -1).view(1, 1, seq_len).float() # calculate loss loss = criterion(output, target) # apply weights loss = loss * weights # reduce and calculate gradients loss.mean().backward()
st32341
class UNet_down_block(torch.nn.Module): def __init__(self, input_channel, output_channel, down_size): super(UNet_down_block, self).__init__() self.conv1 = torch.nn.Conv2d(input_channel, output_channel, 3, padding=1) self.bn1 = torch.nn.BatchNorm2d(output_channel) self.conv2 = torch.nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn2 = torch.nn.BatchNorm2d(output_channel) self.conv3 = torch.nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn3 = torch.nn.BatchNorm2d(output_channel) self.max_pool = torch.nn.MaxPool2d(2, 2) self.relu = torch.nn.ReLU() self.down_size = down_size def forward(self, x): if self.down_size: x = self.max_pool(x) x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) return x class UNet_up_block(torch.nn.Module): def __init__(self, prev_channel, input_channel, output_channel): super(UNet_up_block, self).__init__() self.up_sampling = torch.nn.Upsample(scale_factor=2, mode='bilinear') self.conv1 = torch.nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1) self.bn1 = torch.nn.BatchNorm2d(output_channel) self.conv2 = torch.nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn2 = torch.nn.BatchNorm2d(output_channel) self.conv3 = torch.nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn3 = torch.nn.BatchNorm2d(output_channel) self.relu = torch.nn.ReLU() def forward(self, prev_feature_map, x): x = self.up_sampling(x) x = torch.cat((x, prev_feature_map), dim=1) x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) return x class UNet(torch.nn.Module): def __init__(self): super(UNet, self).__init__() self.down_block1 = UNet_down_block(1, 16, False) self.down_block2 = UNet_down_block(16, 32, True) self.down_block3 = UNet_down_block(32, 64, True) self.down_block4 = UNet_down_block(64, 128, True) self.down_block5 = UNet_down_block(128, 256, True) self.down_block6 = UNet_down_block(256, 512, True) self.down_block7 = UNet_down_block(512, 1024, True) self.mid_conv1 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn1 = torch.nn.BatchNorm2d(1024) self.mid_conv2 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn2 = torch.nn.BatchNorm2d(1024) self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn3 = torch.nn.BatchNorm2d(1024) self.up_block1 = UNet_up_block(512, 1024,512) self.up_block2 = UNet_up_block(256, 512, 256) self.up_block3 = UNet_up_block(128, 256, 128) self.up_block4 = UNet_up_block(64, 128, 64) self.up_block5 = UNet_up_block(32, 64, 32) self.up_block6 = UNet_up_block(16, 32, 16) self.last_conv1 = torch.nn.Conv2d(16, 16, 3, padding=1) self.last_bn = torch.nn.BatchNorm2d(16) self.last_conv2 = torch.nn.Conv2d(16, 1, 1, padding=0) self.relu = torch.nn.ReLU() def forward(self, x): self.x1 = self.down_block1(x) self.x2 = self.down_block2(self.x1) self.x3 = self.down_block3(self.x2) self.x4 = self.down_block4(self.x3) self.x5 = self.down_block5(self.x4) self.x6 = self.down_block6(self.x5) self.x7 = self.down_block7(self.x6) self.x7 = self.relu(self.bn1(self.mid_conv1(self.x7))) self.x7 = self.relu(self.bn2(self.mid_conv2(self.x7))) self.x7 = self.relu(self.bn3(self.mid_conv3(self.x7))) x = self.up_block1(self.x6, self.x7) x = self.up_block2(self.x5, x) x = self.up_block3(self.x4, x) x = self.up_block4(self.x3, x) x = self.up_block5(self.x2, x) x = self.up_block6(self.x1, x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.last_conv2(x) return x RuntimeError: Given groups=1, weight of size [32, 16, 3, 3], expected input[4592, 1, 10, 10] to have 16 channels, but got 1 channels instead help me plzzzz
st32342
The indentation of the down block’s forward is broken, you want to dedent the conv calls. A bit of unsolicited advice: I would advise against assigning to self.xsomething in the forward but use local variables instead unless you have very good reasons to need them after the forward has completed.
st32343
RuntimeError: The size of tensor a (2) must match the size of tensor b (3332) at non-singleton dimension 3
st32344
What are you giving as input to your model ? Beside the indentation error pointed out by tom above, your code is correct. The input must be a 4 dimensional tensor alike (batch_size, channels, height, width) For instance: dummy_input = torch.randn(1, 1, 256, 256) model = UNet() output = model(dummy_input) print(output.size()) # torch.Size([1, 1, 256, 256]) With the indentation fixed, I tried this and it worked.
st32345
using this code class DiscriminatorNet(torch.nn.Module): """ A three hidden-layer discriminative neural network """ def __init__(self): super(DiscriminatorNet, self).__init__() n_features = 40 n_out = 1 self.hidden0 = nn.Sequential( nn.Linear(n_features, 1024), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.hidden1 = nn.Sequential( nn.Linear(1024, 512), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.hidden2 = nn.Sequential( nn.Linear(512, 256), nn.LeakyReLU(0.2), nn.Dropout(0.3) ) self.out = nn.Sequential( torch.nn.Linear(256, n_out), torch.nn.Sigmoid() ) def forward(self, x): x = self.hidden0(x) x = self.hidden1(x) x = self.hidden2(x) x = self.out(x) return x ValueError: Using a target size (torch.Size([2604, 1])) that is different to the input size (torch.Size([2604, 1, 40, 1])) is deprecated. Please ensure they have the same size.
st32346
I’m not sure what you’re trying to do. I assume your error comes from a call to some criterion. If you want to classify 40 features and you have 2604 samples, your input tensor should be of size (2604, 40). For instance, the following code works: criterion = nn.BCEWithLogitsLoss() # 2604 1D vectors of length 40 dummy_input = torch.randn(2604, 40) # 2604 integers in [0, 1], casted to float. dummy_target = torch.randint(2, (2604, 1)).float() model = DiscriminatorNet() output = model(dummy_input) print(output.size()) # torch.Size([2604, 1]) print(dummy_target.size()) # torch.Size([2604, 1]) loss = criterion(dummy_target, output) print(isinstance(loss.item(), float)) # True I can only suggest you to identify which torch function actually causes the error you face and to check the relevent pytorch documentation.
st32347
image848×418 22.1 KB help me plz @the-dharma-bum with torch.no_grad(): output1 = model((train_x.float())) softmax = torch.exp(output1).cpu() prob1 = list(softmax.numpy()) predictions = np.argmax(prob1, axis=1) print('Validation accuracy train: {:.4f}%'.format(float(accuracy_score(train_y, predictions, normalize =False) * 100)))
st32348
Hard to say what is the error here without knowing what are train_y and output1. The following sample code may help you: batch_size = 64 num_classes = 2 dummy_outputs = torch.randn(batch_size, num_classes) dummy_targets = torch.randint(num_classes, size=(batch_size, )) predictions = dummy_outputs.softmax(dim=1).argmax(dim=1).numpy() targets = dummy_targets.numpy() accuracy = accuracy_score(targets, predictions) print(accuracy) # around 0.5 since random predictions and random targets
st32349
def train(epoch): model.train() tr_loss = 0 x_train, y_train = Variable(train_x), Variable(train_y) x_val, y_val = Variable(val_x), Variable(val_y) if torch.cuda.is_available(): x_train = x_train.cuda() y_train = y_train.cuda() x_val = x_val.cuda() y_val = y_val.cuda() optimizer.zero_grad() output_train = model(x_train.float()) output_val = model(x_val.float()) output_train = torch.randn(2604, 1) y_train = torch.randint(2, (2604, 1)).float() loss_train = criterion(output_train, y_train) output_val = torch.randn(2604, 1) y_val = torch.randint(2, (2604, 1)).float() loss_val = criterion(output_val, y_val) train_losses.append(loss_train) val_losses.append(loss_val) with torch.no_grad(): output1 = model((train_x.float())) softmax1 = torch.exp(output1).cpu() prob1 = list(softmax1.numpy()) predictions1 = np.argmax(prob1, axis=1) predictions1 = predictions1.argmax() train_y = train_y.numpy() print('Validation accuracy train: {:.4f}%'.format(float(accuracy_score(train_y, predictions1) * 100))) @the-dharma-bum error
st32350
I’m building a simple network that takes in two numbers and learns how to add them. import torch add1= torch.randint(0,9,size=[6000]) add2= torch.randint(0,9,size=[6000]) add_sum = add1 + add2 This a pretty simple network from torch import nn from torch.nn import functional as F class Net(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(2,20) self.linear2 = nn.Linear(20,1) def forward(self,x1,x2): inp = torch.cat((x1[None],x2[None])).float() out = self.linear1(inp) out = F.relu(out) out = self.linear2(out) return out Here’s the training loop net = Net() optim = torch.optim.AdamW(net.parameters(),lr=0.1) criterion = nn.MSELoss() for i in range(len(add1)): out = net(add1[i],add2[i]) loss = criterion(out,add_sum[i].float()) optim.zero_grad() loss.backward() optim.step() if i%500==0: print(loss) Now the input here doesn’t have batch size, yet it works. But sometimes pytorch inference doesn’t work without a batch size. Why is that? If I’m trying to input a vector of n features to an NN that starts with a linear layer should be the shape (n,) or (n,1) or (1,n) ? Pretty confused about that here. Is the way I’m handling the input the right way? Or is there a better way to do it?
st32351
Solved by ptrblck in post #3 Yes, the docs mention the expected shape for each layer and I would stick it to it. My general rule is that a batch dimension is expected in nn.Modules. While e.g. linear layers could work with an input having a single dimension, you would have to verify what’s applied internally (which dimension is…
st32352
Essentially a broader question would be, is there a guide on the shapes and types the tensors have to be for different models and loss functions?
st32353
Yes, the docs 1 mention the expected shape for each layer and I would stick it to it. My general rule is that a batch dimension is expected in nn.Modules. While e.g. linear layers could work with an input having a single dimension, you would have to verify what’s applied internally (which dimension is broadcasted etc.), so I would prefer to use the documented approach.
st32354
Hello, I implemented depth warping just using inverse_warp.py 1 from DPSNet for 480*640 image and its depth map I want to create a binary map with the target coordinates of depth warping as 1 and the other coordinates as 0. In other words, it means displaying the occlusion-exposed background as 0 and the non-occlusion as 1. (What I want is not an occlusion mask, but it represents occlusion by marking the region that was occluded as 0.) src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2] projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode) return projected_feat, src_pixel_coords To achieve this, I used invers_warp.py to return src_pixel_coords (i.e. target coordinates), which is passed as an argument to the torch.nn.functional.grid_sample() function. From this, src_pixel_coords value, I can produce the binary map I desire by following numpy code: warped_img, src_pixel_coords = inverse_warp( ... ) img_height = 480 img_width = 640 coords = (src_pixel_coords + 1) / 2 #0-1 normalize x_coords = coords[0,:,:,0] * img_width #batch_size = 1 y_coords = coords[0,:,:,1] * img_height x_coords = x_coords.floor().clamp(0,img_width-1).cpu().numpy() y_coords = y_coords.floor().clamp(0,img_height-1).cpu().numpy() mask = torch.zeros(img_height, img_width) for y in range(img_height): for x in range(img_width): y_ = np.clip(2*y-int(y_coords[y,x]), 0, img_height-1) x_ = np.clip(2*x-int(x_coords[y,x]), 0, img_width-1) mask[y_, x_] = 1 And the output: It works properly and produce binary map well. However, there are several problems with this code. It is implemented in numpy, therefore it can’t be computed on GPU. Of course, it doesn’t work when batch size> 1. Bad time complexity. I would like to implement this numpy code as functions for a pytorch tensor and solve the above three problems. However, I am not sure how to solve this problem. Can you help?
st32355
One of the data processing step in my model uses a FFT and/or IFFT to an arbitrary tensor. Things works nicely as long as I kept the dimension of the tensor small. But, once it gets to a certain size, FFT and IFFT ran on GPU won’t spit out values similar to CPU. a = torch.load('H_fft_2000.pt') b = a.clone().cuda() print(f'a.shape : {a.shape}') print(f'b.shape : {b.shape}') print(f'a.device : {a.device}') print(f'a.real Max(): {a.real.max()}, Min(): {a.real.min()}') print(f'a.imag Max(): {a.imag.max()}, Min(): {a.imag.min()}') print(f'a.abs() Max(): {a.abs().max()}, Min(): {a.abs().min()}') print(f'b.device : {b.device}') print(f'b.real Max(): {b.real.max()}, Min(): {b.real.min()}') print(f'b.imag Max(): {b.imag.max()}, Min(): {b.imag.min()}') print(f'b.abs() Max(): {b.abs().max()}, Min(): {b.abs().min()}') a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="ortho") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="ortho") print(f'a_fft.device : {a_fft.device}') print(f'a_fft.real Max(): {a_fft.real.max()}, Min(): {a_fft.real.min()}') print(f'a_fft.imag Max(): {a_fft.imag.max()}, Min(): {a_fft.imag.min()}') print(f'a_fft.abs() Max(): {a_fft.abs().max()}, Min(): {a_fft.abs().min()}') print(f'b_fft.device : {b_fft.device}') print(f'b_fft.real Max(): {b_fft.real.max()}, Min(): {b_fft.real.min()}') print(f'b_fft.imag Max(): {b_fft.imag.max()}, Min(): {b_fft.imag.min()}') print(f'b_fft.abs() Max(): {b_fft.abs().max()}, Min(): {b_fft.abs().min()}') a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="ortho") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="ortho") print(f'a_ifft.device : {a_ifft.device}') print(f'a_ifft.real Max(): {a_ifft.real.max()}, Min(): {a_ifft.real.min()}') print(f'a_ifft.imag Max(): {a_ifft.imag.max()}, Min(): {a_ifft.imag.min()}') print(f'a_ifft.abs() Max(): {a_ifft.abs().max()}, Min(): {a_ifft.abs().min()}') print(f'b_ifft.device : {b_ifft.device}') print(f'b_ifft.real Max(): {b_ifft.real.max()}, Min(): {b_ifft.real.min()}') print(f'b_ifft.imag Max(): {b_ifft.imag.max()}, Min(): {b_ifft.imag.min()}') print(f'b_ifft.abs() Max(): {b_ifft.abs().max()}, Min(): {b_ifft.abs().min()}') Running the code abover with an arbitrary tensor (H_fft_2000.pt) give me OK results regardless of the normalization mode if torch.fft.fftn/ifftn(). a.shape : torch.Size([3999, 3999]) b.shape : torch.Size([3999, 3999]) a.device : cpu a.real Max(): 1227899666432.0, Min(): -1286383861760.0 a.imag Max(): 1345065058304.0, Min(): -1391184314368.0 a.abs() Max(): 1392092512256.0, Min(): 3149313.0 b.device : cuda:0 b.real Max(): 1227899666432.0, Min(): -1286383861760.0 b.imag Max(): 1345065058304.0, Min(): -1391184314368.0 b.abs() Max(): 1392092512256.0, Min(): 3149313.0 a_fft.device : cpu a_fft.real Max(): 63194193920.0, Min(): -63188475904.0 a_fft.imag Max(): 63191957504.0, Min(): -63188336640.0 a_fft.abs() Max(): 63195369472.0, Min(): 63144849408.0 b_fft.device : cuda:0 b_fft.real Max(): 63194198016.0, Min(): -63188475904.0 b_fft.imag Max(): 63191961600.0, Min(): -63188340736.0 b_fft.abs() Max(): 63195381760.0, Min(): 63144853504.0 a_ifft.device : cpu a_ifft.real Max(): 1227899928576.0, Min(): -1286384123904.0 a_ifft.imag Max(): 1345065189376.0, Min(): -1391184576512.0 a_ifft.abs() Max(): 1392092774400.0, Min(): 3148201.0 b_ifft.device : cuda:0 b_ifft.real Max(): 1227899928576.0, Min(): -1286384123904.0 b_ifft.imag Max(): 1345065189376.0, Min(): -1391184445440.0 b_ifft.abs() Max(): 1392092774400.0, Min(): 3139220.5 However, once the input tensor gets larger… (H_fft_2000.pt → H_fft_3000.pt) a = torch.load('H_fft_3000.pt') b = a.clone().cuda() ... a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="forward") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="forward") ... a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="forward") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="forward") ... Running the code abover with an arbitrary tensor (H_fft_3000.pt), and a “forward” normalization won’t give me OK results on both torch.fft.fftn/ifftn(). a.shape : torch.Size([5999, 5999]) b.shape : torch.Size([5999, 5999]) a.device : cpu a.real Max(): 1317886492672.0, Min(): -1305796542464.0 a.imag Max(): 1357465518080.0, Min(): -1370446233600.0 a.abs() Max(): 1387183996928.0, Min(): 554983.6875 b.device : cuda:0 b.real Max(): 1317886492672.0, Min(): -1305796542464.0 b.imag Max(): 1357465518080.0, Min(): -1370446233600.0 b.abs() Max(): 1387183996928.0, Min(): 554983.6875 a_fft.device : cpu a_fft.real Max(): 15802503.0, Min(): -15801069.0 a_fft.imag Max(): 15801941.0, Min(): -15801037.0 a_fft.abs() Max(): 15802796.0, Min(): 15774392.0 b_fft.device : cuda:0 b_fft.real Max(): 38545.73828125, Min(): 0.015421354211866856 b_fft.imag Max(): 0.0, Min(): 0.0 b_fft.abs() Max(): 38545.73828125, Min(): 0.015421354211866856 a_ifft.device : cpu a_ifft.real Max(): 1317886230528.0, Min(): -1305796411392.0 a_ifft.imag Max(): 1357465124864.0, Min(): -1370445971456.0 a_ifft.abs() Max(): 1387183603712.0, Min(): 543693.625 b_ifft.device : cuda:0 b_ifft.real Max(): 38545.73828125, Min(): 0.015421354211866856 b_ifft.imag Max(): 0.0, Min(): 0.0 b_ifft.abs() Max(): 38545.73828125, Min(): 0.015421354211866856 If I change the normalization to “ortho”, then the error only occurs for ifftn(). a = torch.load('H_fft_3000.pt') b = a.clone().cuda() ... a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="ortho") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="ortho") ... a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="ortho") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="ortho") ... The result of the code above is… a.shape : torch.Size([5999, 5999]) b.shape : torch.Size([5999, 5999]) a.device : cpu a.real Max(): 1317886492672.0, Min(): -1305796542464.0 a.imag Max(): 1357465518080.0, Min(): -1370446233600.0 a.abs() Max(): 1387183996928.0, Min(): 554983.6875 b.device : cuda:0 b.real Max(): 1317886492672.0, Min(): -1305796542464.0 b.imag Max(): 1357465518080.0, Min(): -1370446233600.0 b.abs() Max(): 1387183996928.0, Min(): 554983.6875 a_fft.device : cpu a_fft.real Max(): 94799216640.0, Min(): -94790606848.0 a_fft.imag Max(): 94795833344.0, Min(): -94790393856.0 a_fft.abs() Max(): 94800961536.0, Min(): 94630592512.0 b_fft.device : cuda:0 b_fft.real Max(): 94799273984.0, Min(): -94790639616.0 b_fft.imag Max(): 94795882496.0, Min(): -94790459392.0 b_fft.abs() Max(): 94801018880.0, Min(): 94630559744.0 a_ifft.device : cpu a_ifft.real Max(): 1317886099456.0, Min(): -1305796280320.0 a_ifft.imag Max(): 1357464993792.0, Min(): -1370445840384.0 a_ifft.abs() Max(): 1387183341568.0, Min(): 552186.8125 b_ifft.device : cuda:0 b_ifft.real Max(): 15802803.0, Min(): 15774389.0 b_ifft.imag Max(): 0.0, Min(): 0.0 b_ifft.abs() Max(): 15802803.0, Min(): 15774389.0 What could be the cause of this problem?? I suspect some sort of over/underflow during fft/ifft, but I also have doubt considering the same precision on GPU and CPU (torch.complex64 for complex numbers, torch.float32 for float numbers), and no warning or exception during fft/ifft. Any advices?? python: 3.6.9 torch: 1.8.1 cuda: 11.1 (with RTX2060 and GTX1060, meaning no amp nor tf32)
st32356
Hi, Thanks for the report. Could you open an issue on github with your scripts to reproduce this please? That might be an issue on our end or a third party lib we’re using.
st32357
Just opend an issue (as a bug report) on github. Please have a look and let me know if I made any mistakes using the library. github.com/pytorch/pytorch Different torch.fft.fftn/ifftn() result from CPU and GPU 6 opened May 21, 2021 grvyjm ## 🐛 Bug torch.fft.fftn() and ifftn() results from CPU and GPU are not the sa…me for some arbitrarily large tensors. It seems the size of the tensor, range of values, nomalization methos matters. ## To Reproduce Steps to reproduce the behavior: 1. Download and unzip the file for saved tensors and jupyter notebook. (https://drive.google.com/file/d/17sbuLpsb6T8o1B6pT_FHBD_ed6mSoYnx/view?usp=sharing) 1. [This is OK case] Smaller tensor & norm="ortho" ``` a = torch.load('H_fft_2000.pt') b = a.clone().cuda() print(f'a.shape : {a.shape}') print(f'b.shape : {b.shape}') print(f'a.device : {a.device}') print(f'a.real Max(): {a.real.max()}, Min(): {a.real.min()}') print(f'a.imag Max(): {a.imag.max()}, Min(): {a.imag.min()}') print(f'a.abs() Max(): {a.abs().max()}, Min(): {a.abs().min()}') print(f'b.device : {b.device}') print(f'b.real Max(): {b.real.max()}, Min(): {b.real.min()}') print(f'b.imag Max(): {b.imag.max()}, Min(): {b.imag.min()}') print(f'b.abs() Max(): {b.abs().max()}, Min(): {b.abs().min()}') a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="ortho") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="ortho") print(f'a_fft.device : {a_fft.device}') print(f'a_fft.real Max(): {a_fft.real.max()}, Min(): {a_fft.real.min()}') print(f'a_fft.imag Max(): {a_fft.imag.max()}, Min(): {a_fft.imag.min()}') print(f'a_fft.abs() Max(): {a_fft.abs().max()}, Min(): {a_fft.abs().min()}') print(f'b_fft.device : {b_fft.device}') print(f'b_fft.real Max(): {b_fft.real.max()}, Min(): {b_fft.real.min()}') print(f'b_fft.imag Max(): {b_fft.imag.max()}, Min(): {b_fft.imag.min()}') print(f'b_fft.abs() Max(): {b_fft.abs().max()}, Min(): {b_fft.abs().min()}') a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="ortho") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="ortho") print(f'a_ifft.device : {a_ifft.device}') print(f'a_ifft.real Max(): {a_ifft.real.max()}, Min(): {a_ifft.real.min()}') print(f'a_ifft.imag Max(): {a_ifft.imag.max()}, Min(): {a_ifft.imag.min()}') print(f'a_ifft.abs() Max(): {a_ifft.abs().max()}, Min(): {a_ifft.abs().min()}') print(f'b_ifft.device : {b_ifft.device}') print(f'b_ifft.real Max(): {b_ifft.real.max()}, Min(): {b_ifft.real.min()}') print(f'b_ifft.imag Max(): {b_ifft.imag.max()}, Min(): {b_ifft.imag.min()}') print(f'b_ifft.abs() Max(): {b_ifft.abs().max()}, Min(): {b_ifft.abs().min()}') ``` 2. [This is NOK case] Larger tensor & norm="forward" ``` a = torch.load('H_fft_3000.pt') b = a.clone().cuda() ... a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="forward") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="forward") ... a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="forward") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="forward") ... ``` 3. [This is NOK case] Larger tensor & norm="ortho" ``` a = torch.load('H_fft_3000.pt') b = a.clone().cuda() ... a_fft = torch.fft.fftn(a, dim=[-2,-1], norm="ortho") b_fft = torch.fft.fftn(b, dim=[-2,-1], norm="ortho") ... a_ifft = torch.fft.ifftn(a_fft, dim=[-2,-1], norm="ortho") b_ifft = torch.fft.ifftn(b_fft, dim=[-2,-1], norm="ortho") ... ``` ## Expected behavior (Precision error caused from floating point operation are ignored) 1. For a smaller tensor, fftn()/ifftn() with norm="ortho" ran on CPU and GPU are regared the same. ``` a.shape : torch.Size([3999, 3999]) b.shape : torch.Size([3999, 3999]) a.device : cpu a.real Max(): 1227899666432.0, Min(): -1286383861760.0 a.imag Max(): 1345065058304.0, Min(): -1391184314368.0 a.abs() Max(): 1392092512256.0, Min(): 3149313.0 b.device : cuda:0 b.real Max(): 1227899666432.0, Min(): -1286383861760.0 b.imag Max(): 1345065058304.0, Min(): -1391184314368.0 b.abs() Max(): 1392092512256.0, Min(): 3149313.0 a_fft.device : cpu a_fft.real Max(): 63194193920.0, Min(): -63188475904.0 a_fft.imag Max(): 63191957504.0, Min(): -63188336640.0 a_fft.abs() Max(): 63195369472.0, Min(): 63144849408.0 b_fft.device : cuda:0 b_fft.real Max(): 63194198016.0, Min(): -63188475904.0 b_fft.imag Max(): 63191961600.0, Min(): -63188340736.0 b_fft.abs() Max(): 63195381760.0, Min(): 63144853504.0 a_ifft.device : cpu a_ifft.real Max(): 1227899928576.0, Min(): -1286384123904.0 a_ifft.imag Max(): 1345065189376.0, Min(): -1391184576512.0 a_ifft.abs() Max(): 1392092774400.0, Min(): 3148201.0 b_ifft.device : cuda:0 b_ifft.real Max(): 1227899928576.0, Min(): -1286384123904.0 b_ifft.imag Max(): 1345065189376.0, Min(): -1391184445440.0 b_ifft.abs() Max(): 1392092774400.0, Min(): 3139220.5 ``` 2. For a larger tensor, fftn()/ifftn() with norm="forward" ran on CPU and GPU are NOT the same. ``` a.shape : torch.Size([5999, 5999]) b.shape : torch.Size([5999, 5999]) a.device : cpu a.real Max(): 1317886492672.0, Min(): -1305796542464.0 a.imag Max(): 1357465518080.0, Min(): -1370446233600.0 a.abs() Max(): 1387183996928.0, Min(): 554983.6875 b.device : cuda:0 b.real Max(): 1317886492672.0, Min(): -1305796542464.0 b.imag Max(): 1357465518080.0, Min(): -1370446233600.0 b.abs() Max(): 1387183996928.0, Min(): 554983.6875 a_fft.device : cpu a_fft.real Max(): 15802503.0, Min(): -15801069.0 a_fft.imag Max(): 15801941.0, Min(): -15801037.0 a_fft.abs() Max(): 15802796.0, Min(): 15774392.0 b_fft.device : cuda:0 b_fft.real Max(): 38545.73828125, Min(): 0.015421354211866856 b_fft.imag Max(): 0.0, Min(): 0.0 b_fft.abs() Max(): 38545.73828125, Min(): 0.015421354211866856 a_ifft.device : cpu a_ifft.real Max(): 1317886230528.0, Min(): -1305796411392.0 a_ifft.imag Max(): 1357465124864.0, Min(): -1370445971456.0 a_ifft.abs() Max(): 1387183603712.0, Min(): 543693.625 b_ifft.device : cuda:0 b_ifft.real Max(): 38545.73828125, Min(): 0.015421354211866856 b_ifft.imag Max(): 0.0, Min(): 0.0 b_ifft.abs() Max(): 38545.73828125, Min(): 0.015421354211866856 ``` 3. For a larger tensor, fftn() with norm="ortho" ran on CPU and GPU are regared the same. For a larger tensor, ifftn() with norm="ortho" ran on CPU and GPU are NOT the same. ``` a.shape : torch.Size([5999, 5999]) b.shape : torch.Size([5999, 5999]) a.device : cpu a.real Max(): 1317886492672.0, Min(): -1305796542464.0 a.imag Max(): 1357465518080.0, Min(): -1370446233600.0 a.abs() Max(): 1387183996928.0, Min(): 554983.6875 b.device : cuda:0 b.real Max(): 1317886492672.0, Min(): -1305796542464.0 b.imag Max(): 1357465518080.0, Min(): -1370446233600.0 b.abs() Max(): 1387183996928.0, Min(): 554983.6875 a_fft.device : cpu a_fft.real Max(): 94799216640.0, Min(): -94790606848.0 a_fft.imag Max(): 94795833344.0, Min(): -94790393856.0 a_fft.abs() Max(): 94800961536.0, Min(): 94630592512.0 b_fft.device : cuda:0 b_fft.real Max(): 94799273984.0, Min(): -94790639616.0 b_fft.imag Max(): 94795882496.0, Min(): -94790459392.0 b_fft.abs() Max(): 94801018880.0, Min(): 94630559744.0 a_ifft.device : cpu a_ifft.real Max(): 1317886099456.0, Min(): -1305796280320.0 a_ifft.imag Max(): 1357464993792.0, Min(): -1370445840384.0 a_ifft.abs() Max(): 1387183341568.0, Min(): 552186.8125 b_ifft.device : cuda:0 b_ifft.real Max(): 15802803.0, Min(): 15774389.0 b_ifft.imag Max(): 0.0, Min(): 0.0 b_ifft.abs() Max(): 15802803.0, Min(): 15774389.0 ``` ## Environment ``` PyTorch version: 1.8.1+cu111 Is debug build: False CUDA used to build PyTorch: 11.1 ROCM used to build PyTorch: N/A OS: Ubuntu 18.04.5 LTS (x86_64) GCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0 Clang version: Could not collect CMake version: version 3.10.2 Libc version: glibc-2.25 Python version: 3.6 (64-bit runtime) Python platform: Linux-4.15.0-142-generic-x86_64-with-Ubuntu-18.04-bionic Is CUDA available: True CUDA runtime version: 9.0.176 GPU models and configuration: GPU 0: GeForce GTX 1070 Nvidia driver version: 450.119.03 cuDNN version: Could not collect HIP runtime version: N/A MIOpen runtime version: N/A Versions of relevant libraries: [pip3] numpy==1.17.4 [pip3] torch==1.8.1+cu111 [pip3] torchaudio==0.8.1 [pip3] torchvision==0.9.1+cu111 [conda] Could not collect ``` ## Additional context
st32358
I am using Python 1.7 In [1]: import torch ...: from torchvision.models import vgg19 ...: ...: device = torch.device("cuda:0") In [2]: In [2]: memory = torch.cuda.memory_allocated(device) Segmentation fault (core dumped) And my GPU info: ~# nvidia-smi Fri May 21 13:13:27 2021 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 418.87.01 Driver Version: 418.87.01 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:10.0 Off | 0 | | N/A 33C P0 56W / 300W | 15323MiB / 16130MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 Tesla V100-SXM2... On | 00000000:00:11.0 Off | 0 | | N/A 33C P0 57W / 300W | 462MiB / 16130MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 Tesla V100-SXM2... On | 00000000:00:12.0 Off | 0 | | N/A 39C P0 56W / 300W | 464MiB / 16130MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 3 Tesla V100-SXM2... On | 00000000:00:13.0 Off | 0 | | N/A 55C P0 283W / 300W | 15965MiB / 16130MiB | 99% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| +-----------------------------------------------------------------------------+ However, if I check on terminal, it has no segmentation fault: # python -c 'import torch; print(torch.cuda.memory_allocated(torch.cuda.device("cuda:0")))' 0 Also, the GPU is used fully, but why this check also returns 0? # python -c 'import torch; print(torch.cuda.memory_allocated(torch.cuda.device("cuda:3")))' 0
st32359
The code you provide looks fine; I run it just now, and there is no error occurred. If possible, you can launch a new terminal to try again or provide more detailed information about your exp env, like torch version.
st32360
Pruning could also be used as a sort of neural architecture search method. Say, I have a two-layer convolutional neural network. conv1 = torch.nn.Conv2d(in_channels=3, out_channels=12) conv2 = torch.nn.Conv2d(in_channels=12, out_channels=8) If I used structured pruning and say the last three channels of the conv2 kernel was pruned to 0. Then the useful architecture of the pruned model should be conv1 = torch.nn.Conv2d(in_channels=3, out_channels=9) conv2 = torch.nn.Conv2d(in_channels=9, out_channels=8) This architecture with the remaining parameters could be saved as a new model. The size of this new model should be smaller and the inference speed of the new model should be faster than the unpruned one. I wonder if PyTorch is planning to add this interface or not.
st32361
If there aren’t any work on this, maybe we can create an extended class and create a push request?
st32362
@leimao Shouldn’t the ‘useful architecture’ post structured pruning be as: conv1 = torch.nn.Conv2d(in_channels = 3, out_channels = 9) conv2 = torch.nn.Conv2d(in_channels = 9, out_channels = 5) because you are only pruning the last three channels/filters/kernel maps of conv2 layer? conv1 layer is untouched/unpruned.
st32363
When I tried to export my trained pytorch model to ONNX format, I encounter the error: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient After searching on board, I found multiple cases that results in same error, but I didn’t find a solution suitable for my case. Here is my code: # Define a function: input raw data => preprocessing & model inference => prediction from PIL import Image # Preprocess image for PyTorch data architecture from torchvision import transforms data_preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(0.456, 0.225) ]) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def process_for_model(img): ''' Process image array for PyTorch model img: input tensor of an image return: Tensor image prepared to input to the model ''' #img = Image.fromarray(img).convert('RGB') img = data_preprocess(img) # remember to do data preprocessing as in the training stage!! This strongly influence the testing performance img = img.to(device) img = img.view(1, 3, 889, 929) return img def pipeline(input_batch, preprocess, inference): processed_data = preprocess(input_batch) output = inference(processed_data) return output def final_pipeline(input_batch): prediction = pipeline(input_batch) return prediction class mypipeline(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_batch): input_batch = input_batch.cpu().detach().numpy().astype(np.float32) processed_data = process_for_model(input_batch) print("processed data:", processed_data) self.output = pre_model(processed_data).cpu().detach() return self.output input_shape = (889, 929, 3) dummy_input = torch.randint(0, 255, size = (889, 929, 3), device = torch.device("cuda:0")) pre_model.eval() pipeline = mypipeline() pipeline.eval() with torch.no_grad(): torch.onnx.export(pipeline, dummy_input, "txt_overlap_pip.onnx", verbose = False, ) I’ve numpy array in the workflow, which I’m not sure if it would be the cause of problem. Any advice is appreciated, thank you!
st32364
Hi, my CNN is quite simple 6 Conv-BN-Relu stacked network and it should run on CPU. The main speed bottle neck is in “nonzero” op in the last. It takes almost 90% of the whole processing time, so I need to optimize this op more. I found torch uses OpenMP or TBB’s “parallel for” and Intel and facebook collaborated to make a faster one called IPEX-pytorch(mabye it is out of box). Does anybody can tell me how to implement a faster NonZero op? I tried Openvino and Onnxruntime. Openvino can optimize some ops with weight but it doesn’t support Nonzero op. Onnxruntime is almost same as torch’s nonzero op. Thank you!
st32365
I found every sub-process will initGlobalState in c10, even they use the same one underlying device. Thus we cannot invoke the same CUDA stream between different processes. Is there any to share CUDA context across many processes in python-level l. Thus, we can manage CUDA streams by ourselves across multiple processes. Thanks.
st32366
I am facing an issue where my batch size of 16 seems to automatically change to 4 batches of 4 when running my code on 4 GPUs, and the output is not returned to me as an output of batch size 16. Here is the relevant part of my training loop, where I first print the input batch shape, then pass it to my model, and finally print the output shape. The encoder model also prints the input shape as soon as it receives the input (which is giving a mismatch while training on multiple GPUs) print("Encoder input shape: ", input_tensor.shape) encoder_output, (encoder_hidden, encoder_cell) = encoder(input_tensor) decoder_input = torch.squeeze(encoder_hidden, 0) print("Decoder input shape: ", decoder_input.shape) class Encoder(nn.Module): . . . def forward(self, context_panels): print("Input Context Panels shape: ", context_panels.shape) encoded_panels = self.panel_encoder(context_panels) print("Panel encoder output shape: ", encoded_panels.shape) output, (hidden, cell) = self.sequence_encoder(encoded_panels) return output, (hidden, cell) This is the console output I get on CPU training, which is the expected behavior: Encoder input shape: torch.Size([16, 3, 160, 160]) Input Context Panels shape: torch.Size([16, 3, 160, 160]) Panel encoder output shape: torch.Size([16, 3, 128]) Decoder input shape: torch.Size([16, 128]) However, this is what I get when I run on 4 GPUs: Encoder input shape: torch.Size([16, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Decoder input shape: torch.Size([4, 4, 128]) Any feedback on what I am doing wrong is greatly appreciated! EDIT: So the batch size is getting resized currently for the encoder output from an LSTM, but the encoder hidden features are not being reshaped in a similar manner as I expect them to. CPU output: Encoder input shape: torch.Size([16, 3, 160, 160]) Input Context Panels shape: torch.Size([16, 3, 160, 160]) Panel encoder output shape: torch.Size([16, 3, 128]) Encoder output shape: torch.Size([16, 3, 128]) Encoder hidden shape: torch.Size([1, 16, 128]) Decoder input shape: torch.Size([16, 128]) 4 GPU output: Encoder input shape: torch.Size([16, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Input Context Panels shape: torch.Size([4, 3, 160, 160]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Panel encoder output shape: torch.Size([4, 3, 128]) Encoder output shape: torch.Size([16, 3, 128]) Encoder hidden shape: torch.Size([4, 4, 128]) Decoder input shape: torch.Size([4, 4, 128])
st32367
Right, if you are using DataParallel or DistributedDataParallel the input will automatically be split across the batch dimension for the different devices (in this case the 4 GPUs) so that is why there are 4 calls each with 1/4 the total original batch size. This is expected behavior and the output you get should be coalesced to the full batch size. Is this causing an error for you somewhere else?