instruction
stringlengths 13
150
| input
stringlengths 36
29.1k
| output
stringlengths 31
29.5k
| source
stringlengths 45
45
|
---|---|---|---|
Pytorch model object has no attribute 'predict' BERT | I had train a BertClassifier model using pytorch. After creating my best.pt I would like to make in production my model and using it to predict and classifier starting from a sample, so I resume them from the checkpoint. Otherwise after put it in evaluation and freeze model, I use .predict to make in work on my sample but I'm encountering this Attribute Error. I had also inizialize it before calling the checkpoint. When I am wrong? Thank you for your help!
def save_ckp(state, is_best, checkpoint_path, best_model_path):
"""
function created to save checkpoint, the latest one and the best one.
This creates flexibility: either you are interested in the state of the latest checkpoint or the best checkpoint.
state: checkpoint we want to save
is_best: is this the best checkpoint; min validation loss
checkpoint_path: path to save checkpoint
best_model_path: path to save best model
"""
f_path = checkpoint_path
# save checkpoint data to the path given, checkpoint_path
torch.save(state, f_path)
# if it is a best model, min validation loss
if is_best:
best_fpath = best_model_path
# copy that checkpoint file to best path given, best_model_path
shutil.copyfile(f_path, best_fpath)
def load_ckp(checkpoint_fpath, model, optimizer):
"""
checkpoint_path: path to save checkpoint
model: model that we want to load checkpoint parameters into
optimizer: optimizer we defined in previous training
"""
# load check point
checkpoint = torch.load(checkpoint_fpath)
# initialize state_dict from checkpoint to model
model.load_state_dict(checkpoint['state_dict'])
# initialize optimizer from checkpoint to optimizer
optimizer.load_state_dict(checkpoint['optimizer'])
# initialize valid_loss_min from checkpoint to valid_loss_min
valid_loss_min = checkpoint['valid_loss_min']
# return model, optimizer, epoch value, min validation loss
return model, optimizer, checkpoint['epoch'], valid_loss_min.item()
#Create the BertClassfier class
class BertClassifier(nn.Module):
"""Bert Model for Classification Tasks."""
def __init__(self, freeze_bert=True):
"""
@param bert: a BertModel object
@param classifier: a torch.nn.Module classifier
@param freeze_bert (bool): Set `False` to fine-tune the BERT model
"""
super(BertClassifier, self).__init__()
.......
def forward(self, input_ids, attention_mask):
''' Feed input to BERT and the classifier to compute logits.
@param input_ids (torch.Tensor): an input tensor with shape (batch_size,
max_length)
@param attention_mask (torch.Tensor): a tensor that hold attention mask
information with shape (batch_size, max_length)
@return logits (torch.Tensor): an output tensor with shape (batch_size,
num_labels) '''
# Feed input to BERT
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask)
# Extract the last hidden state of the token `[CLS]` for classification task
last_hidden_state_cls = outputs[0][:, 0, :]
# Feed input to classifier to compute logits
logits = self.classifier(last_hidden_state_cls)
return logits
def initialize_model(epochs):
""" Initialize the Bert Classifier, the optimizer and the learning rate scheduler."""
# Instantiate Bert Classifier
bert_classifier = BertClassifier(freeze_bert=False)
# Tell PyTorch to run the model on GPU
bert_classifier = bert_classifier.to(device)
# Create the optimizer
optimizer = AdamW(bert_classifier.parameters(),
lr=lr, # Default learning rate
eps=1e-8 # Default epsilon value
)
# Total number of training steps
total_steps = len(train_dataloader) * epochs
# Set up the learning rate scheduler
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0, # Default value
num_training_steps=total_steps)
return bert_classifier, optimizer, scheduler
def train(model, train_dataloader, val_dataloader, valid_loss_min_input, checkpoint_path, best_model_path, start_epochs, epochs, evaluation=True):
"""Train the BertClassifier model."""
# Start training loop
logging.info("--Start training...\n")
# Initialize tracker for minimum validation loss
valid_loss_min = valid_loss_min_input
for epoch_i in range(start_epochs, epochs):
# =======================================
# Training
# =======================================
# Print the header of the result table
logging.info((f"{'Epoch':^7} | {'Batch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}"))
# Measure the elapsed time of each epoch
t0_epoch, t0_batch = time.time(), time.time()
# Reset tracking variables at the beginning of each epoch
total_loss, batch_loss, batch_counts = 0, 0, 0
# Put the model into the training mode
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
batch_counts +=1
# Load batch to GPU
b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)
# Zero out any previously calculated gradients
model.zero_grad()
# Perform a forward pass. This will return logits.
logits = model(b_input_ids, b_attn_mask)
# Compute loss and accumulate the loss values
loss = loss_fn(logits, b_labels)
batch_loss += loss.item()
total_loss += loss.item()
# Perform a backward pass to calculate gradients
loss.backward()
# Clip the norm of the gradients to 1.0 to prevent "exploding gradients"
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and the learning rate
optimizer.step()
scheduler.step()
# Print the loss values and time elapsed for every 20 batches
if (step % 500 == 0 and step != 0) or (step == len(train_dataloader) - 1):
# Calculate time elapsed for 20 batches
time_elapsed = time.time() - t0_batch
# Print training results
logging.info(f"{epoch_i + 1:^7} | {step:^7} | {batch_loss / batch_counts:^12.6f} | {'-':^10} | {'-':^9} | {time_elapsed:^9.2f}")
# Reset batch tracking variables
batch_loss, batch_counts = 0, 0
t0_batch = time.time()
# Calculate the average loss over the entire training data
avg_train_loss = total_loss / len(train_dataloader)
logging.info("-"*70)
# =======================================
# Evaluation
# =======================================
if evaluation == True:
# After the completion of each training epoch, measure the model's performance
# on our validation set.
val_loss, val_accuracy = evaluate(model, val_dataloader)
# Print performance over the entire training data
time_elapsed = time.time() - t0_epoch
logging.info(f"{epoch_i + 1:^7} | {'-':^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^10.6f} | {time_elapsed:^9.2f}")
logging.info("-"*70)
logging.info("\n")
# create checkpoint variable and add important data
checkpoint = {
'epoch': epoch_i + 1,
'valid_loss_min': val_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
# save checkpoint
save_ckp(checkpoint, False, checkpoint_path, best_model_path)
## TODO: save the model if validation loss has decreased
if val_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min,val_loss))
# save checkpoint as best model
save_ckp(checkpoint, True, checkpoint_path, best_model_path)
valid_loss_min = val_loss
logging.info("-----------------Training complete--------------------------")
def evaluate(model, val_dataloader):
"""After the completion of each training epoch, measure the model's performance on our validation set."""
# Put the model into the evaluation mode. The dropout layers are disabled during the test time.
model.eval()
# Tracking variables
val_accuracy = []
val_loss = []
# For each batch in our validation set...
for batch in val_dataloader:
# Load batch to GPU
b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
# Compute loss
loss = loss_fn(logits, b_labels)
val_loss.append(loss.item())
# Get the predictions
preds = torch.argmax(logits, dim=1).flatten()
# Calculate the accuracy rate
accuracy = (preds == b_labels).cpu().numpy().mean() * 100
val_accuracy.append(accuracy)
# Compute the average accuracy and loss over the validation set.
val_loss = np.mean(val_loss)
val_accuracy = np.mean(val_accuracy)
return val_loss, val_accuracy
bert_classifier, optimizer, scheduler = initialize_model(epochs=n_epochs)
train(model = bert_classifier ......)
bert_classifier, optimizer, scheduler = initialize_model(epochs=n_epochs)
model, optimizer, start_epoch, valid_loss_min = load_ckp(r"./best_model/best_model.pt", bert_classifier, optimizer)
model.eval()
model.freeze()
sample = {
"seq": "ABCDE",}
predictions = model.predict(sample)
AttributeError: 'BertClassifier' object has no attribute 'predict'
| Generally, people wrote the prediction function for you.
If not, you need to handle the low level stuff.
After this line, you loaded the trained parameters.
model, optimizer, start_epoch, valid_loss_min = load_ckp(r"./best_model/best_model.pt", bert_classifier, optimizer)
After that, you need to do the model.forward(intput_seq,this_attention_mask_maybe_null).
You can see the forward method here is the : def forward(self, input_ids, attention_mask) in the model.
| https://stackoverflow.com/questions/72147225/ |
TypeError: cross_entropy_loss(): argument βinputβ (position 1) must be Tensor, not Linear | I was following a youtube video and learning to make a chat bot, the teacher explained this step to make the training model, the code compiled perfectly for the teacher but im getting an error. What am i doing wrong?
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(device, dtype=torch.int64)
outputs= model(words)
loss = criterion(outputs,labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(epoch +1) % 100 == 0:
print(f'epoch {epoch+1}/{epoch}, loss = {loss.item():.4f}')
print(f'epoch {epoch+1}/{epoch}, loss = {loss.item():.4f}')
NeuralNet:
class NeuralNet(nn.Module):
def __init__(self,input_size, hidden_size,num_classes):
super(NeuralNet,self).__init__()
self.l1 = nn.Linear(input_size,hidden_size)
self.l2 = nn.Linear(hidden_size,hidden_size)
self.l3 = nn.Linear(hidden_size,num_classes)
self.relu = nn.ReLU()
def forward(self,x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3
return out
| The issue is with the NeuralNet code specifically in the line:
out = self.l3
You are setting out to be the Linear layer instead of calling the linear layer on the data. Change it to
out = self.l3(out)
and it will work
| https://stackoverflow.com/questions/72153323/ |
how to set the gradient for a network in pytorch | I have a model in pytorch. The model can take any shape but lets assume this is the model
torch_model = Sequential(
Flatten(),
Linear(28 * 28, 256),
Dropout(.4),
ReLU(),
BatchNorm1d(256),
ReLU(),
Linear(256, 128),
Dropout(.4),
ReLU(),
BatchNorm1d(128),
ReLU(),
Linear(128, 10),
Softmax()
)
I am using SGD optimizer, I want to set the gradient for each of the layers so the SGD algorithm will move the parameters in the direction I want.
Lets say I want all the gradients for all the layers to be ones (torch.ones_like(gradient_shape)) how can I do this?
Thanks?
| In PyTorch, with a model defined as yours above, you can iterate over the layers like this:
for layer in list(torch_model.modules())[1:]:
print(layer)
You have to add the [1:] since the first module returned is the sequential module itself. In any layer, you can access the weights with layer.weight. However, it is important to remember that some layers, like Flatten and Dropout, don't have weights. A way to check, and then add 1 to each weight would be:
for layer in list(torch_model.modules())[1:]:
if hasattr(layer, 'weight'):
with torch.no_grad():
for i in range(layer.weight.shape[0]):
layer.weight[i] = layer.weight[i] + 1
I tested the above on your model and it does add 1 to every weight. Worth noting that it won't work without torch.no_grad() as you don't want pytorch tracking the changes.
| https://stackoverflow.com/questions/72155950/ |
How to get weights from layer of neural network that is defined as a class object in Pytorch? | I am developing something on top of an already existing framework of code, and I am having some trouble extracting weights from a neural network defined as a class. Code below
import numpy as np
import torch
import torch.nn as nn
class Solver:
class Head(nn.Module):
def __init__(self, base):
super().__init__()
self.base = base
self.last_layer = nn.Linear(100, 10)
def forward(self, x):
x = self.base(x)
x = self.last_layer(x)
return x
def __init__(self, bases, HeadClass=None):
self.base = bases
if HeadClass:
self.head = self.Head(self.base)
else:
self.head = self.Head(self.base)
print('Head Class:',self.head)
class Full_Solver:
class Base(nn.Module):
def __init__(self):
super().__init__()
self.linear_1 = nn.Linear(1, 100)
self.linear_2 = nn.Linear(100, 100)
self.linear_3 = nn.Linear(100, 100)
def forward(self, x):
x = self.linear_1(x)
x = torch.tanh(x)
x = self.linear_2(x)
x = torch.tanh(x)
x = self.linear_3(x)
x = torch.tanh(x)
return x
def __init__(self, BaseClass=Base()):
self.base = BaseClass
print('Base model:',self.base)
print('Base model type:',type(self.base))
solver_1 = Solver(self.base)
print('Full model:',solver_1)
print('Full model type:',type(solver_1))
xx = Full_Solver()
In the Full_Solver class, I am defining a Base Neural Network, which will add a Head based on some conditions in the Solver class (I have left out all the conditions for sake of brevity). When I do print('Full model:',solver_1), the output I am getting is Full model: <__main__.Solver object at 0x7f83a82e9cd0>. How do I extract the output weights from this class object? (Assume that I just want to extract the randomly assigned weights from this)
| In this case it would be print('Full model:',solver_1.head.last_layer.weight)
| https://stackoverflow.com/questions/72161982/ |
How to calculate log_softmax for list of tensors without breaking autograd in Pytorch | I'm trying to calculate the log_softmax function of a list of tensors, i.e., a list [t_1, t_2, ..., t_n] where each t_i is of type torch.tensor and each t_i can be of a different, arbitrary shape. I do not want to apply the log_softmax function to each t_i separately, but to all of them as if they were part of the same unique tensor. The output of this function should be a list of tensors with the same shape as the input. Lastly, as I will apply this function to the end layer of a neural network, I want to be able to differentiate this function, i.e., the gradients must flow through it.
Pytorch provides the class torch.nn.LogSoftmax, but I cannot use it as it expects a single tensor as input, instead of a list of tensors. Additionally, I want to calculate the log_softmax function efficiently and in a stable way. To achieve that, I want to use the log-sum-exp trick. Lastly, I want to ignore the last value of the first element of the list (see code snippet below), i.e., not apply log_softmax to it.
This is my current implementation:
def log_softmax(pred_tensors):
minus_inf = -1000 # Constant that represents minus infinity
# Calculate the max value
c = max([preds.amax() if preds is not None else minus_inf for preds in pred_tensors])
# Calculate log(sum(e^(x_i-c)))
log_sum_exp = 0
for r in range(len(pred_tensors)):
if pred_tensors[r] is not None:
# Arity 0 -> ignore nullary predicate corresponding to termination condition
curr_sum = torch.sum(torch.exp(pred_tensors[r][:-1] - c)) if r == 0 else \
torch.sum(torch.exp(pred_tensors[r] - c))
log_sum_exp += curr_sum
log_sum_exp = torch.log(log_sum_exp)
# Calculate log_softmax (apply log_softmax to the original tensor) (except to the termination condition)
for r in range(len(pred_tensors)):
if pred_tensors[r] is not None:
# Arity 0 -> ignore nullary predicate corresponding to termination condition
if r == 0:
pred_tensors[r][:-1] -= log_sum_exp + c
else:
pred_tensors[r] -= log_sum_exp + c
return pred_tensors
I have tested it and it works. However, I think my implementation may be breaking the autograd of Pytorch, in lines c = max([preds.amax() if preds is not None else minus_inf for preds in pred_tensors]) and log_sum_exp += curr_sum.
So, my questions are: Is my implementation really breaking autograd? If it is, can you provide an alternative implementation that works with autograd?
| I also posted this question on the Pytorch Forum and was solved there. I post the solution below:
def _log_softmax(self, pred_tensors):
# Remove the nullary predicate associated with the termination condition, so that it does not
# affect the log_softmax computation
term_cond_value = pred_tensors[0][-1]
pred_tensors[0] = pred_tensors[0][:-1]
# Calculate log_sum_exp of all the values in the tensors of the list
# 1) flatten each tensor in the list
# 2) concatenate them as a unique tensor
# 3) calculate log_sum_exp
log_sum_exp = torch.logsumexp(torch.cat([preds.flatten() if preds is not None else torch.empty(0, dtype=torch.float32) for preds in pred_tensors]), dim=-1)
# Use log_sum_exp to calculate the log_softmax of the tensors in the list
for r in range(len(pred_tensors)):
if pred_tensors[r] is not None:
pred_tensors[r] -= log_sum_exp
# Append the nullary predicate corresponding to the termination condition
pred_tensors[0] = torch.cat([pred_tensors[0], term_cond_value.reshape(1)]) # We need reshape() to transform from tensor of dimension 0 to dimension 1
return pred_tensors
Basically, I firstly removed from the list of tensors the element pred_tensors[0][-1], so that it did not affect the calculations, and appended it to the final list of tensors. Then, since I could not concatenate a list of tensors of different sizes, I first flattened them and then used torch.cat to concatenate them, before using torch.logsumexp to calculate the log_sum_exp with all the values in all the tensors of the list. Then, this value was finally used to calculate the log_softmax of each tensor value, obtaining a list of output tensors with the same shape as the input.
| https://stackoverflow.com/questions/72179358/ |
Keras: Mimic PyTorch's conv2d and linear/dense weight initialization? | I am porting a model from PyTorch to Keras/Tensorflow, and I want to make sure I'm using the same algorithm for weight initialization. How do I mimic PyTorch's weight initialization in Keras?
| If you refactor the PyTorch initialization code, you'll find that the weight initialization algorithm is surprisingly simple. The comment in that code is correct; just read that comment and mimic it.
Here's working Keras / Tensorflow code that mimics it:
import tensorflow as tf
from tensorflow.keras import layers
class PytorchInitialization(tf.keras.initializers.VarianceScaling):
def __init__(self, seed=None):
super().__init__(
scale=1 / 3, mode='fan_in', distribution='uniform', seed=seed)
# Conv layer
conv = layers.Conv2D(32, 3, activation="relu", padding="SAME",
input_shape=(28, 28, 1),
kernel_initializer=PytorchInitialization(),
bias_initializer=PytorchInitialization())
# Dense / linear layer
classifier = layers.Dense(10,
kernel_initializer=PytorchInitialization(),
bias_initializer=PytorchInitialization(),
| https://stackoverflow.com/questions/72185607/ |
What it means when your model can't overfit a small batch of data? | I am trying to train RNN model to classify sentences into 4 classes, but it doesnβt seem to work. I tried to overfit 4 examples (blue line) which worked, but even as little as 8 examples (red line) is not working, let alone the whole dataset.
I tried different learning rates and sizes of hidden_size and embedding_size but it doesnβt seem to help, what am I missing? I know that if the model is not able to overfit small batch it means the capacity should be increased but in this case increasing capacity has no effect.
The architecture is as follows:
class RNN(nn.Module):
def __init__(self, embedding_size=256, hidden_size=128, num_classes=4):
super().__init__()
self.embedding = nn.Embedding(len(vocab), embedding_size, 0)
self.rnn = nn.RNN(embedding_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
#x=[batch_size, sequence_length]
x = self.embedding(x) #x=[batch_size, sequence_length, embedding_size]
_, h_n = self.rnn(x) #h_n=[1, batch_size, hidden_size]
h_n = h_n.squeeze(0)
out = self.fc(h_n) #out=[batch_size, num_classes]
return out
Input data is tokenized sentences, padded with 0 to the longest sentence in the batch, so as an example one sample would be: [2784, 9544, 1321, 120, 0, 0]. The data is from AG_NEWS dataset from torchtext datasets.
The training code:
model = RNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
model.train()
for epoch in range(NUM_EPOCHS):
epoch_losses = []
correct_predictions = []
for batch_idx, (labels, texts) in enumerate(train_loader):
scores = model(texts)
loss = criterion(scores, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
epoch_losses.append(loss.item())
correct = (scores.max(1).indices==labels).sum()
correct_predictions.append(correct)
epoch_avg_loss = sum(epoch_losses)/len(epoch_losses)
epoch_avg_accuracy = float(sum(correct_predictions))/float(len(labels))
| The issue was due to the vanishing gradient, it got resolved by using batch normalization.
| https://stackoverflow.com/questions/72186789/ |
How to find score probability of classification model result in pytorch? | I'm new to pytorch using this I've trained a image classification model, when I test the model with the image I only get label , if I want to get probability of prediction of that class how can I get that ?
test_image = test_image_tensor.view(1,3,300,300)
model.eval()
out = model(test_image)
ps = torch.exp(out)
topk,topclass = ps.topk(1,dim=1)
class_name = idx_to_class[topclass.cpu().numpy()[0][0]]
I'm using above code for prediction which gives only class name , if I want label score of prediction how can I get it?
Any help or suggestion on this will be appreciated
| The probabilities are the softmax of the predictions:
class_prob = torch.softmax(out, dim=1)
# get most probable class and its probability:
class_prob, topclass = torch.max(class_prob, dim=1)
# get class names
class_name = idx_to_class[topclass.cpu().numpy()[0][0]]
| https://stackoverflow.com/questions/72187725/ |
nn.CorssEntropyLoss documentation example is not correct? | Tested the documentation example of nn.CrossEntropyLoss. It doesn't seem to work.
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5).softmax(dim=1)
output = loss(input, target)
print(output)
error:
RuntimeError: 1D target tensor expected, multi-target not supported
| You might need to upgrade your torch version.
pip install torch --upgrade
| https://stackoverflow.com/questions/72191924/ |
How to get intermediate output grad in Pytorch model | we can get loss of last layer by loss = loss_fn(y_pred, y_true), and results in a loss: Tensor
then we call loss.backward() to do back propagation.
after optimizer.step() we could see updated model.parameters()
taking below example
y = Model1(x) # with optimizer1
z = Model2(y) # with optimizer2
loss = loss_fn(z, z_true)
loss.backward()
optimizer2.optimize() # update Model2 parameters
# in order to update Model1 parameters I think we should do
y.backward(grad_tensor=the_output_gradient_from_Model2)
optimizer1.optimize()
How to get the intermediate back propagation result? e.g. the gradient of output grad, which will be taken by y_pred.backward(grad_tensor=grad).
Update: The solution is setting required_grad=True and take Tensor x.grad. Thanks for the answers.
PS: The scenario is I am doing a federated learning, the model is split into 2 parts. The first part takes input and forward to second part. And it need the second part to calculate the loss and back propagate the loss to first part, so that the first part takes the loss and do its own back propagation.
| I will assume you're referring to intermediate gradients when you say "loss of a specific layer".
You can access the gradient of the layer with respect to the output loss by accessing the grad attribute on the parameters of your model which require gradient computation.
Here is a simplistic setup:
>>> f = nn.Sequential(
nn.Linear(10,5),
nn.Linear(5,2),
nn.Linear(2, 2, bias=False),
nn.Sigmoid())
>>> x = torch.rand(3, 10).requires_grad_(True)
>>> f(x).mean().backward()
Navigate through all the parameters per layer:
>>> for n, c in f.named_children():
... for p in c.parameters():
... print(f'<{n}>:{p.grad}')
<0>:tensor([[-0.0054, -0.0034, -0.0028, -0.0058, -0.0073, -0.0066, -0.0037, -0.0044,
-0.0035, -0.0051],
[ 0.0037, 0.0023, 0.0019, 0.0040, 0.0050, 0.0045, 0.0025, 0.0030,
0.0024, 0.0035],
[-0.0016, -0.0010, -0.0008, -0.0017, -0.0022, -0.0020, -0.0011, -0.0013,
-0.0010, -0.0015],
[ 0.0095, 0.0060, 0.0049, 0.0102, 0.0129, 0.0116, 0.0066, 0.0077,
0.0063, 0.0091],
[ 0.0005, 0.0003, 0.0002, 0.0005, 0.0006, 0.0006, 0.0003, 0.0004,
0.0003, 0.0004]])
<0>:tensor([-0.0090, 0.0062, -0.0027, 0.0160, 0.0008])
<1>:tensor([[-0.0035, 0.0035, -0.0026, -0.0106, -0.0002],
[-0.0020, 0.0020, -0.0015, -0.0061, -0.0001]])
<1>:tensor([-0.0289, -0.0166])
<2>:tensor([[0.0355, 0.0420],
[0.0354, 0.0418]])
| https://stackoverflow.com/questions/72194917/ |
Why is the actual runtime still cpu after I set cuda for tensor | I want to gpu-accelerate a custom function, and below is the original function:
import numpy as np
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
It would use the CPU to compute, but that wasn't fast enough so I wanted to speed it up directly with PyTorch, which I converted into a Torch implementation:
import numpy as np
import torch
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
dets = torch.from_numpy(dets)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dets.cuda()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = ((x2 - x1 + 1) * (y2 - y1 + 1)).cuda()
order = torch.argsort(scores)
order = torch.flip(order, dims=[0])
keep = []
while order.size()[0] > 0:
i = order[0]
keep.append(i)
xx1 = torch.maximum(x1[i], x1[order[1:]]).cuda()
yy1 = torch.maximum(y1[i], y1[order[1:]]).cuda()
xx2 = torch.minimum(x2[i], x2[order[1:]]).cuda()
yy2 = torch.minimum(y2[i], y2[order[1:]]).cuda()
w = torch.maximum(torch.tensor(0.0), xx2 - xx1 + 1).cuda()
h = torch.maximum(torch.tensor(0.0), yy2 - yy1 + 1).cuda()
inter = (w * h).cuda()
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = torch.where(ovr <= thresh)[0].cuda()
order = order[inds + 1].cuda()
return keep
But in fact, all the calculations still use the CPU, does anyone know why?
| cuda() is no in-place. Change your line 3 of the function to dets = dets.cuda().
| https://stackoverflow.com/questions/72198387/ |
How to apply max_length to truncate the token sequence from the left in a HuggingFace tokenizer? | In the HuggingFace tokenizer, applying the max_length argument specifies the length of the tokenized text. I believe it truncates the sequence to max_length-2 (if truncation=True) by cutting the excess tokens from the right. For the purposes of utterance classification, I need to cut the excess tokens from the left, i.e. the start of the sequence in order to preserve the last tokens. How can I do that?
from transformers import AutoTokenizer
train_texts = ['text 1', ...]
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
encodings = tokenizer(train_texts, max_length=128, truncation=True)
| Tokenizers have a truncation_side parameter that should set exactly this.
See the docs.
| https://stackoverflow.com/questions/72202295/ |
Can autograd handle repeated use of the same layer in the same depth of the computation graph? | I have a network which works as follows: The input is split in half; the first half is put through some convolutional layers l1, then the second half is put through the same layers l1 (after the output for the first half of the input has been computed), then the two output representations are concatenated and put through additional layers l2 at once. Now my question (similar to Can autograd in pytorch handle a repeated use of a layer within the same module? but not quite the same setting as in the other question, the same layer was reused in different depths of the computation graph, whereas here, the same layer is used twice within the same depth) is: does autograd handle this properly? I.e. is the backpropagation error for l1 computed with respect to both of its forward passes and the weights are adapted w.r.t. both of these at once?
| Autograd does not care how many times you "use" something. This is not how it works. it just builds a graph behind the scenes of the dependencies, using something twice just makes a graph that is not a line, but it will not affect its execution.
| https://stackoverflow.com/questions/72204054/ |
The input of the forward part of my model is a tuple, cannot be converted to onnx format? | Test Code:
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(32, 16)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.fc = nn.Linear(32, 2)
def forward(self, x):
x1, x2 = x
x1 = self.linear(x1)
x1 = self.relu1(x1)
x2 = self.linear(x2)
x2 = self.relu2(x2)
out = torch.cat((x1, x2), dim=-1)
out = self.fc(out)
return out
model = Model()
model.eval()
x1 = torch.randn((2, 10, 32))
x2 = torch.randn((2, 10, 32))
x = (x1, x2)
torch.onnx.export(model,
x,
'model.onnx',
input_names=["input"],
output_names=["output"],
dynamic_axes={'input': {0: 'batch'}, 'output': {0: 'batch'}}
)
print("Done")
How to convert the above code to onnx?
The input of the forward part of my model is a tuple, cannot be converted to onnx format?
thanks!
The input of the forward part of my model is a tuple, which cannot be converted to onnx format according to the existing methods. Can you tell me how to solve it
| Looking at this issue and this other issue, the parameters are unpacked by default so you need to provide a tuple as argument to torch.onnx.export:
torch.onnx.export(model,
args=(x,),
f='model.onnx',
input_names=["input"],
output_names=["output"],
dynamic_axes={'input': {0: 'batch'}, 'output': {0: 'batch'}})
| https://stackoverflow.com/questions/72211330/ |
Saving the weights of a Pytorch .pth model into a .txt or .json | I am trying to save the the weights of a pytorch model into a .txt or .json. When writing it to a .txt,
#import torch
model = torch.load("model_path")
string = str(model)
with open('some_file.txt', 'w') as fp:
fp.write(string)
I get a file where not all the weights are saved, i.e there are ellipsis throughout the textfile. I cannot write it to a JSON since the model has tensors, which are not JSON serializable [unless there is a way that I do not know?] How can I save the weights in the .pth file to some format such that no information is lost, and can be easily seen?
Thanks
| When you are doing str(model.state_dict()), it recursively uses str method of elements it contains. So the problem is how individual element string representations are build. You should increase the limit of lines printed in individual string representation:
torch.set_printoptions(profile="full")
See the difference with this:
import torch
import torchvision.models as models
mobilenet_v2 = models.mobilenet_v2()
torch.set_printoptions(profile="default")
print(mobilenet_v2.state_dict()['features.2.conv.0.0.weight'])
torch.set_printoptions(profile="full")
print(mobilenet_v2.state_dict()['features.2.conv.0.0.weight'])
Tensors are currently not JSON serializable.
| https://stackoverflow.com/questions/72222557/ |
Send and load an ML model over Apache Kafka | I've been looking around here and on the Internet, but it seems that I'm the first one having this question.
I'd like to train an ML model (let's say something with PyTorch) and write it to an Apache Kafka cluster. On the other side, there should be the possibility of loading the model again from the received array of bytes. It seems that almost all the frameworks only offer methods to load from a path, so a file.
The only constraint I'm trying to satisfy is to not save the model as a file, so I won't need a storage.
Am I missing something? Do you have any idea how to solve it?
| One reason to avoid this is that Kafka messages have a default of 1MB max. Therefore sending models around in topics wouldn't be the best idea, and therefore why you could instead use model files, stored in a shared filesystem, and send URIs to the files (strings) to download in the consumer clients.
For small model files, there is nothing preventing you from dumping the Kafka record bytes to a local file, but if you happen to change the model input parameters, then you'd need to edit the consumer code, anyway.
Or you can embed the models in other stream processing engines (still on local filesystems), as linked in the comments.
| https://stackoverflow.com/questions/72223191/ |
How to get time taken for each layer in Pytorch? | I want to know the inference time of a layer in Alexnet. This code measures the inference time of the first fully connected layer of Alexnet as the batch size changes. And I have a few questions about this.
Is it possible to measure the inference time accurately with the following code?
Is there a time difference because the CPU and GPU run separately?
Is there a module used to measure layer inference time in Pytorch?
Given the following code:
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import time
from tqdm import tqdm
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.maxpool2D = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
self.adaptive_avg_polling = nn.AdaptiveAvgPool2d((6, 6))
self.dropout = nn.Dropout(p=0.5)
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc1 = nn.Linear(256 * 6 * 6, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, 1000)
def time(self, x):
x = self.maxpool2D(self.relu(self.conv1(x)))
x = self.maxpool2D(self.relu(self.conv2(x)))
x = self.relu(self.conv3(x))
x = self.relu(self.conv4(x))
x = self.maxpool2D(self.relu(self.conv5(x)))
x = self.adaptive_avg_polling(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
start1 = time.time()
x = self.fc1(x)
finish1 = time.time()
x = self.dropout(self.relu(x))
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return finish1 - start1
def layer_time():
use_cuda = torch.cuda.is_available()
print("use_cuda : ", use_cuda)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
device= torch.device("cuda:0" if use_cuda else "cpu")
net = AlexNet().to(device)
test_iter = 10000
batch_size = 1
for i in range(10):
X = torch.randn(size=(batch_size, 3, 227, 227)).type(FloatTensor)
s = 0.0
for i in tqdm(range(test_iter)):
s += net.time(X)
print(s)
batch_size *= 2
layer_time()
| I found a way to measure inference time by studying the AMP document. Using this, the GPU and CPU are synchronized and the inference time can be measured accurately.
import torch, time, gc
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer():
torch.cuda.synchronize()
end_time = time.time()
return end_time - start_time
So my code changes as follows:
import torch, time, gc
from tqdm import tqdm
import torch.nn as nn
import torch
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer():
torch.cuda.synchronize()
end_time = time.time()
return end_time - start_time
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.maxpool2D = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
self.adaptive_avg_polling = nn.AdaptiveAvgPool2d((6, 6))
self.dropout = nn.Dropout(p=0.5)
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc1 = nn.Linear(256 * 6 * 6, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, 1000)
def time(self, x):
x = self.maxpool2D(self.relu(self.conv1(x)))
x = self.maxpool2D(self.relu(self.conv2(x)))
x = self.relu(self.conv3(x))
x = self.relu(self.conv4(x))
x = self.maxpool2D(self.relu(self.conv5(x)))
x = self.adaptive_avg_polling(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
# Check first linear layer inference time
start_timer()
x = self.fc1(x)
result = end_timer()
x = self.dropout(self.relu(x))
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return result
def layer_time():
use_cuda = torch.cuda.is_available()
print("use_cuda : ", use_cuda)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
device= torch.device("cuda:0" if use_cuda else "cpu")
net = AlexNet().to(device)
test_iter = 1000
batch_size = 1
for i in range(10):
X = torch.randn(size=(batch_size, 3, 227, 227)).type(FloatTensor)
s = 0.0
for i in tqdm(range(test_iter)):
s += net.time(X)
print(s)
batch_size *= 2
layer_time()
| https://stackoverflow.com/questions/72224866/ |
Assigning custom weights to embedding layer in PyTorch | Does PyTorch's nn.Embedding support manually setting the embedding weights for only specific values?
I know I could set the weights of the entire embedding layer like this -
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.weights = torch.nn.Parameter(torch.from_numpy(weight_matrix))
But does PyTorch provide any succinct/efficient method to set the embedding weights for only one particular value?
Something like emb_layer.set_weight(5) = torch.tensor([...]) to manually set the embedding only for the value "5"?
| Yes. You can run emb_layer.weight.shape to see the shape of the weights, and then you can access and change a single weight like this, for example:
with torch.no_grad():
emb_layer.weight[idx_1,idx_2] = some_value
I use two indices here since the embedding layer is two dimensional. Some layers, like a Linear layer, would only require one index.
| https://stackoverflow.com/questions/72230580/ |
I this the correct way of computing the average accuracy? | I am fairly new to coding and getting confused between average accuracy and overall accuracy. I have created a function to calculate accuracy, i then divide this result by the len(dataloader) at the end of each epoch. Is this the correct way to calculate average accuracy? If not could someone explain how I go about doing this correctly?
def accuracy(predictions, labels):
classes = torch.argmax(predictions, dim=1)
return torch.mean((classes == labels).float())
def train(model, optimizer, dataloader):
#Setting model to train mode
model.train()
acc = 0.0
loss = 0.0
loss_fc = nn.CrossEntropyLoss()
for i, (img, label) in enumerate(dataloader):
#source images and labels to cpu device
img, label = img.to(device), label.to(device)
y_pred = model(img)
optimizer.zero_grad()
loss = loss_fc(y_pred, label)
loss.backward()
optimizer.step()
#Update loss and accuracy
loss += loss.item()
acc += accuracy(y_pred, s_label)
loss /= len(dataloader)
acc /= len(dataloader)
| Not sure what you mean by the overall and average accuracy. Typically accuracy is calculated at the end of each epoch. You pass the accuracy function your predictions and your actual labels and it returns what proportion you got right as a decimal (0-1).
I haven't seen any use for calculating the average accuracy across every epoch during training as this metric would be heavily impacted by how fast your model learns rather than how well it is able to eventually perform e.g. a model that needs a lot of epochs to do well will probably appear worse on this average accuracy than one that can converge on fewer epochs.
If you take a look at the accuracy score metric from scikit-learn it should help clear things up for you.
Link:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
Hope this helps!
| https://stackoverflow.com/questions/72231947/ |
Colab crashes when trying to create confusion matrix | I am trying to create a confusion matrix for my test set. My test set consists of 3585 images. Whenever I try to run the following code:
x_test,y_test = next(iter(dataloader)))
y_pred = resnet(x_test)
Google colab crashes using all the available RAM. Does anyone have a work around for this? Should I do this in batches?
|
Should I do this in batches?
Yes! Try to reduce batch size.
dataloader = ... # reduce batch size here on dataloader creation
...
y_pred = []
for batch in dataloader:
batch_y_pred = resnet(batch)
y_pred.append(batch_y_pred)
I use list with append, you can try another way.
| https://stackoverflow.com/questions/72236286/ |
Import error: 'SimpleExperiment' while running BOTORCH example code | I am trying to work with Bayesian Optimisation for my Numerical model run, Optimising its parameters. For this I am using BoTorch. Its example code is given as follows:
#!/usr/bin/env python3
# coding: utf-8
# ## Using a custom botorch model with Ax
#
# In this tutorial, we illustrate how to use a custom BoTorch model within Ax's `SimpleExperiment` API. This allows us to harness the convenience of Ax for running Bayesian Optimization loops, while at the same time maintaining full flexibility in terms of the modeling.
#
# Acquisition functions and strategies for optimizing acquisitions can be swapped out in much the same fashion. See for example the tutorial for [Implementing a custom acquisition function](./custom_acquisition).
#
# If you want to do something non-standard, or would like to have full insight into every aspect of the implementation, please see [this tutorial](./closed_loop_botorch_only) for how to write your own full optimization loop in BoTorch.
# ### Implementing the custom model
#
# For this tutorial, we implement a very simple gpytorch Exact GP Model that uses an RBF kernel (with ARD) and infers a (homoskedastic) noise level.
#
# Model definition is straightforward - here we implement a gpytorch `ExactGP` that also inherits from `GPyTorchModel` -- this adds all the api calls that botorch expects in its various modules.
#
# *Note:* botorch also allows implementing other custom models as long as they follow the minimal `Model` API. For more information, please see the [Model Documentation](../docs/models).
# In[1]:
from botorch.models.gpytorch import GPyTorchModel
from gpytorch.distributions import MultivariateNormal
from gpytorch.means import ConstantMean
from gpytorch.models import ExactGP
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.priors import GammaPrior
class SimpleCustomGP(ExactGP, GPyTorchModel):
_num_outputs = 1 # to inform GPyTorchModel API
def __init__(self, train_X, train_Y):
# squeeze output dim before passing train_Y to ExactGP
super().__init__(train_X, train_Y.squeeze(-1), GaussianLikelihood())
self.mean_module = ConstantMean()
self.covar_module = ScaleKernel(
base_kernel=RBFKernel(ard_num_dims=train_X.shape[-1]),
)
self.to(train_X) # make sure we're on the right device/dtype
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
# #### Define a factory function to be used with Ax's BotorchModel
#
# Ax's `BotorchModel` internally breaks down the different components of Bayesian Optimization (model generation & fitting, defining acquisition functions, and optimizing them) into a functional api.
#
# Depending on which of these components we want to modify, we can pass in an associated custom factory function to the `BotorchModel` constructor. In order to use a custom model, we have to implement a model factory function that, given data according to Ax's api specification, instantiates and fits a BoTorch Model object.
#
# The call signature of this factory function is the following:
#
# ```python
# def get_and_fit_gpytorch_model(
# Xs: List[Tensor],
# Ys: List[Tensor],
# Yvars: List[Tensor],
# state_dict: Optional[Dict[str, Tensor]] = None,
# **kwargs: Any,
# ) -> Model:
# ```
#
# where
# - the `i`-th element of `Xs` are the training features for the i-th outcome as an `n_i x d` tensor (in our simple example, we only have one outcome)
# - similarly, the `i`-th element of `Ys` and `Yvars` are the observations and associated observation variances for the `i`-th outcome as `n_i x 1` tensors
# - `state_dict` is an optional PyTorch module state dict that can be used to initialize the model's parameters to pre-specified values
#
# The function must return a botorch `Model` object. What happens inside the function is up to you.
#
# Using botorch's `fit_gpytorch_model` utility function, model-fitting is straightforward for this simple model (you may have to use your own custom model fitting loop when working with more complex models - see the tutorial for [Fitting a model with torch.optim](fit_model_with_torch_optimizer).
# In[2]:
from botorch.fit import fit_gpytorch_model
def _get_and_fit_simple_custom_gp(Xs, Ys, **kwargs):
model = SimpleCustomGP(Xs[0], Ys[0])
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
return model
# ### Set up the optimization problem in Ax
# Ax's `SimpleExperiment` API requires an evaluation function that is able to compute all the metrics required in the experiment. This function needs to accept a set of parameter values as a dictionary. It should produce a dictionary of metric names to tuples of mean and standard error for those metrics.
#
# For this tutorial, we use the Branin function, a simple synthetic benchmark function in two dimensions. In an actual application, this could be arbitrarily complicated - e.g. this function could run some costly simulation, conduct some A/B tests, or kick off some ML model training job with the given parameters).
# In[3]:
import random
import numpy as np
def branin(parameterization, *args):
x1, x2 = parameterization["x1"], parameterization["x2"]
y = (x2 - 5.1 / (4 * np.pi ** 2) * x1 ** 2 + 5 * x1 / np.pi - 6) ** 2
y += 10 * (1 - 1 / (8 * np.pi)) * np.cos(x1) + 10
# let's add some synthetic observation noise
y += random.normalvariate(0, 0.1)
return {"branin": (y, 0.0)}
# We need to define a search space for our experiment that defines the parameters and the set of feasible values.
# In[4]:
from ax import ParameterType, RangeParameter, SearchSpace
search_space = SearchSpace(
parameters=[
RangeParameter(
name="x1", parameter_type=ParameterType.FLOAT, lower=-5, upper=10
),
RangeParameter(
name="x2", parameter_type=ParameterType.FLOAT, lower=0, upper=15
),
]
)
# Third, we make a `SimpleExperiment` β note that the `objective_name` needs to be one of the metric names returned by the evaluation function.
# In[5]:
from ax import SimpleExperiment
exp = SimpleExperiment(
name="test_branin",
search_space=search_space,
evaluation_function=branin,
objective_name="branin",
minimize=True,
)
# We use the Sobol generator to create 5 (quasi-) random initial point in the search space. Calling `batch_trial` will cause Ax to evaluate the underlying `branin` function at the generated points, and automatically keep track of the results.
# In[6]:
from ax.modelbridge import get_sobol
sobol = get_sobol(exp.search_space)
exp.new_batch_trial(generator_run=sobol.gen(5))
# To run our custom botorch model inside the Ax optimization loop, we can use the `get_botorch` factory function from `ax.modelbridge.factory`. Any keyword arguments given to this function are passed through to the `BotorchModel` constructor. To use our custom model, we just need to pass our newly minted `_get_and_fit_simple_custom_gp` function to `get_botorch` using the `model_constructor` argument.
#
# **Note:** `get_botorch` by default automatically applies a number of parameter transformations (e.g. to normalize input data or standardize output data). This is typically what you want for standard use cases with continuous parameters. If your model expects raw parameters, make sure to pass in `transforms=[]` to avoid any transformations to take place. See the [Ax documentation](https://ax.dev/docs/models.html#transforms) for additional information on how transformations in Ax work.
# #### Run the optimization loop
#
# We're ready to run the Bayesian Optimization loop.
# In[7]:
from ax.modelbridge.factory import get_botorch
for i in range(5):
print(f"Running optimization batch {i+1}/5...")
model = get_botorch(
experiment=exp,
data=exp.eval(),
search_space=exp.search_space,
model_constructor=_get_and_fit_simple_custom_gp,
)
batch = exp.new_trial(generator_run=model.gen(1))
print("Done!")
I have installed BoTorch as :
conda install botorch -c pytorch -c gpytorch
Upon doing :
python3 custom_botorch_model_in_ax.py
I am getting the error message :
Traceback (most recent call last):
File "/Users/sayantanmandal/Downloads/custom_botorch_model_in_ax.py", line 136, in <module>
from ax import SimpleExperiment
ImportError: cannot import name 'SimpleExperiment' from 'ax' (/Users/sayantanmandal/opt/miniconda3/lib/python3.9/site-packages/ax/__init__.py)
There is some issue with some submodule SimpleExperiment from module ax.
Installing ax via:
conda or pip install ax, does not solve this issue. What steps should I take from here. Your help is greatly appreciated. Thanks.
| Looks like SimpleExperiment was dropped starting with 0.2.4. An issue response recommends switching to Experiment. Otherwise, role back to 0.2.3.
If this example was found in version-controlled documentation, it would be nice if you notified the maintainers that the example needs updating.
Note that on Conda and PyPI, the ax module is delivered through the ax-platform package.
| https://stackoverflow.com/questions/72238545/ |
pytorch gather failed with sparse_grad=True | With even very simple example, backward() cannot work if sparse_grad=True, please see the error below.
Is this error expected, or I'm using gather in a wrong way?
In [1]: import torch as th
In [2]: x = th.rand((3,3), requires_grad=True)
# sparse_grad = False, the backward could work as expetecd
In [3]: th.gather(x @ x, 1, th.LongTensor([[0], [1]]), sparse_grad=False).sum().backward()
# sparse_grad = True, backward CANNOT work
In [4]: th.gather(x @ x, 1, th.LongTensor([[0], [1]]), sparse_grad=True).sum().backward()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
----> 1 th.gather(x @ x, 1, th.LongTensor([[0], [1]]), sparse_grad=True).sum().backward()
~/miniconda3/lib/python3.9/site-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
305 create_graph=create_graph,
306 inputs=inputs)
--> 307 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
308
309 def register_hook(self, hook):
~/miniconda3/lib/python3.9/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
152 retain_graph = create_graph
153
--> 154 Variable._execution_engine.run_backward(
155 tensors, grad_tensors_, retain_graph, create_graph, inputs,
156 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
RuntimeError: sparse tensors do not have strides
| I think torch.gather does not support sparse operators:
torch.gather(x, 1, torch.LongTensor([[0], [1]]).to_sparse())
Results with:
NotImplementedError: Could not run 'aten::gather.out' with arguments from the 'SparseCPU' backend.
I think you should open an issue or a feature request on pytorch's github.
| https://stackoverflow.com/questions/72239086/ |
How to iterate through all parameters in a neural network using pytorch | I have the following simple fully-connected neural network:
class Neural_net(nn.Module):
def __init__(self):
super(Neural_net, self).__init__()
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 1)
self.fc_out = nn.Linear(1, 1)
def forward(self, x,train = True):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = self.fc_out(x)
return x
net = Neural_net()
How can I loop through all the parameters of the network and check if for example they are greater than a certain value? I am using pytorch and if I do:
for n,p in net.named_parameters():
if p > value:
...
I get an error since p is not a single number, but rather a tensor of either weights or biases for each layer.
My goal is to check if a criterion is satisfied for each of the parameters and flag them e.g. with 1 if it is or 0 if it is not, storing it in a dictionary with the same structure as net.parameters(). Yet, I am having trouble figuring out how to loop through them.
I thought about creating a parameter vector:
param_vec = torch.cat([p.view(-1) for p in net.parameters()])
and then accessing the parameter values and checking them would be easy,but then I can't think of a way to go back to the dictionary form to flag them.
Thank you for any help!
| First I would define the criterion as an operation on a tensor. In your case, this could look like this:
cond = lambda tensor: tensor.gt(value)
Then you just need to apply it to each tensor in net.parameters(). To keep it with the same structure, you can do it with dict comprehension:
cond_parameters = {n: cond(p) for n,p in net.named_parameters()}
Let's see it in practice!
net = Neural_net()
print(dict(net.parameters())
#> {'fc1.weight': Parameter containing:
#> tensor([[-0.4767, 0.0771],
#> [ 0.2874, 0.5474]], requires_grad=True),
#> 'fc1.bias': Parameter containing:
#> tensor([ 0.0405, -0.1997], requires_grad=True),
#> 'fc2.weight': Parameter containing:
#> tensor([[0.5400, 0.3241]], requires_grad=True),
#> 'fc2.bias': Parameter containing:
#> tensor([-0.5306], requires_grad=True),
#> 'fc_out.weight': Parameter containing:
#> tensor([[-0.9706]], requires_grad=True),
#> 'fc_out.bias': Parameter containing:
#> tensor([-0.4174], requires_grad=True)}
Let's set value to zero and get the dict of parameters:
value = 0
cond = lambda tensor: tensor.gt(value)
cond_parameters = {n: cond(p) for n,p in net.named_parameters()}
#>{'fc1.weight': tensor([[False, True],
#> [ True, True]]),
#> 'fc1.bias': tensor([ True, False]),
#> 'fc2.weight': tensor([[True, True]]),
#> 'fc2.bias': tensor([False]),
#> 'fc_out.weight': tensor([[False]]),
#> 'fc_out.bias': tensor([False])}
| https://stackoverflow.com/questions/72247846/ |
Missmatching target size in criterion | I'm trying to use pytorch in for the IMBD dataset, to predict the positive and negative reviews. When I get to the training state, the following error is given by the criterion function:
ValueError: Target size (torch.Size([64])) must be the same as input size (torch.Size([1136, 64, 1]))
After some research, I saw that the error is because the output of the model is returning a tensor of size [1136, 64, 1], and criterion is expecting only batch results.
Howerver, I don't know how to solve this error.
My code:
import torch
import spacy
import torch.nn as nn
from torchtext.legacy import data
import sys
import csv
import torch.optim as optim
import re
import nltk
from nltk.corpus import stopwords
from torchtext import vocab
from torchtext.legacy.data import Field
from torchtext.legacy import datasets
import pandas as pd
import re
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import random
SEED = 1234
torch.manual_seed(SEED) # For reproducibility
torch.backends.cudnn.deterministic = True
import torch.nn.functional as F
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.hidden_fc = nn.Linear(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text):
#text = [sent len, batch size]
embedded = self.embedding(text)
h_1 = F.relu(self.hidden_fc(embedded))
# assert torch.equal(output[-1,:,:], h_1.squeeze(0))
# [batch size, output dim]
return self.fc(h_1.squeeze(0))
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds)) # 0.75 --> 1 0.4 --> 0
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train() #Train mode is on
for batch in iterator:
optimizer.zero_grad() #Reset the gradients
predictions = model(batch.text) ## forward propagation
print(predictions.shape)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward() ## backward propagation / calculate gradients
optimizer.step() ## update parameters
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval() #Evaluation mode is on
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
TEXT = data.Field(tokenize = 'spacy',
tokenizer_language = 'en_core_web_sm',
lower = True)
LABEL = data.LabelField(dtype = torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL) ## IMDB reviews dataset
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE) #Build the vocabulary using the top frequent 25K words
LABEL.build_vocab(train_data)
BATCH_SIZE = 64
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE)
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
model = MLP(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
import torch.optim as optim
optimizer = optim.SGD(model.parameters(), lr=1e-3)
criterion = nn.BCEWithLogitsLoss()
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
| To summarize your problem, you have reviews you want to classify as positive or negative. To do so you train an embedding space to map each word to a vector, then output a probability for each sentence and supervised with the corresponding label using a binary cross-entropy loss nn.BCELossWithLogits.
You current model is comprised of:
nn.Embedding: embeds each word in the sequence independently thus converting the input tensor shape from (seq_len, batch_size) to (seq_len, batch_size, embedding_dim). Where seq_len is the number of tokens in your input sequence.
nn.Linear layer reduces the dimensionality by projecting the features, the tensor shape is converted from (seq_len, batch_size, embedding_dim) to (seq_len, batch_size, hidden_dim).
A non-linearity layer is applied to the sequence of word vectors. Note how the structure of the sentence is retained. And finally, apply a second linear layer to map from (seq_len, batch_size, hidden_dim) to (seq_len, batch_size, output_dim). Still with the sentence structure (cf. the dim=0 with seq_len).
This is the reason why you are getting (1136, 64, 1) as the predictions shape: 1136 must be your sequence length, 64 is BATCH_SIZE, while 1 is OUTPUT_DIM.
Yet you are trying to classify each sequence as a whole, what you would need instead is a single tensor or scalar value per sentence, i.e. a shape of (1, 64, 1). This implies reducing the first dimension corresponding to the sequence dimension, to a single value.
A straightforward and easy way to reduce the dimension such that you can represent the whole sentence with a single vector is by applying an average pool to the sentence. The average vector of the words in each sentence should give you the sentiment of the positiveness/negativeness of the overall sentence. You can apply this operator before the final projection to remain in a relatively high dimension, either with nn.AdaptiveAvgPool1d with an output size of 1 or simply torch.Tensor.mean.
Here is a possible implementation with nn.AdaptiveAvgPool1d:
class MLP(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.hidden_fc = nn.Linear(embedding_dim, hidden_dim)
self.avg = nn.AdaptiveAvgPool1d(1) # reduces (N, C, L_in) to (N, C, 1)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text):
# (seq_len, batch_size) = (1136, 64)
embedded = self.embedding(text)
# (seq_len, batch_size, embedding_dim) = (1136, 64, 100)
h_1 = F.relu(self.hidden_fc(embedded))
# (seq_len, batch_size, hidden_dim) = (1136, 64, 256)
avg = self.avg(h_1.permute(1,2,0))
# (batch_size, hidden_dim, 1) = (64, 256, 1)
out = self.fc(avg.squeeze(-1))
# (batch_size, 1, 1) = (64, 1)
return out
Or with torch.Tensor.mean:
class MLP(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.hidden_fc = nn.Linear(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text):
# (seq_len, batch_size) = (1136, 64)
embedded = self.embedding(text)
# (seq_len, batch_size, embedding_dim) = (1136, 64, 100)
h_1 = F.relu(self.hidden_fc(embedded))
# (seq_len, batch_size, hidden_dim) = (1136, 64, 256)
avg = h_1.mean(0)
# (batch_size, hidden_dim, 1) = (64, 256)
out = self.fc(avg)
# (batch_size, 1, 1) = (64, 1)
return out
Alternative methods involve using more sophisticated neural network layers such as recurrent neural network blocks (nn.RNN, nn.LSTM, nn.GRU)...
| https://stackoverflow.com/questions/72249637/ |
Understanding an Einsum usage for graph convolution | I am reading the code for the spatial-temporal graph convolution operation here:
https://github.com/yysijie/st-gcn/blob/master/net/utils/tgcn.py and I'm having some difficulty understanding what is happening with an einsum operation. In particular
For x a tensor of shape (N, kernel_size, kc//kernel_size, t, v), where
kernel_size is typically 3, and lets say kc=64*kernel_size, t is the number of frames, say 64, and v the number of vertices, say 25. N is the batch size.
Now for a tensor A of shape (3, 25, 25) where each dimension is a filtering op on the graph vertices, an einsum is computed as:
x = torch.einsum('nkctv,kvw->nctw', (x, A))
I'm not sure how to interpret this expression. What I think it's saying is that for each batch element, for each channel c_i out of 64, sum each of the three matrices obtained by matrix multiplication of the (64, 25) feature map at that channel with the value of A[i]. Do I have this correct? The expression is a bit of a mouthful, and notation wise there seems to be a bit of a strange usage of kc as one variable name, but then decomposition of k as kernel size and c as the number of channels (192//3 = 64) in the expression for the einsum. Any insights appreciated.
| Helps when you look closely at the notation:
nkctv for left side
kvw on the right side
nctw being the result
Missing from the result are:
k
v
These elements are summed together into a single value and squeezed, leaving us the resulting shape.
Something along the lines of (expanded shapes (added 1s) are broadcasted and sum per element):
left: (n, k, c, t, v, 1)
right: (1, k, 1, 1, v, w)
Now it goes (l, r for left and right):
torch.mul(l, r)
torch.sum(l, r, dim=(1, 4))
squeeze any singular dimensions
It is pretty hard to get, hence Einstein's summation helps in terms of thinking about resulting shapes βmixedβ with each other, at least for me.
| https://stackoverflow.com/questions/72252168/ |
How to judge a torch.tensor dtype is int or not? | I want to check a Tensor convert from an image is normalized or not, i.e. the dtype is int or float. Is there convenient way to achieve this goal? I do not want an enumerated condition check like a.dtype == torch.int or a.dtype == torch.int32 or a.dtype ==torch.uint8 .... . Or is there another way to check an image tensor normalized or not?
| As pointed out by jasonharper, you can use torch.is_floating_point to check your tensor:
if torch.is_floating_point(my_tensor):
# the tensor is "normalized"...
else:
# the tensor is an integer and needs to be normalized...
| https://stackoverflow.com/questions/72253473/ |
How to resize image tensors | The following is my code where I'm converting every image to PIL and then turning them into Pytorch tensors:
transform = transforms.Compose([transforms.PILToTensor()])
# choose the training and test datasets
train_data = os.listdir('data/training/')
testing_data = os.listdir('data/testing/')
train_tensors = []
test_tensors = []
for train_image in train_data:
img = Image.open('data/training/' + train_image)
train_tensors.append(transform(img))
for test_image in testing_data:
img = Image.open('data/testing/' + test_image)
test_tensors.append(transform(img))
# Print out some stats about the training and test data
print('Train data, number of images: ', len(train_data))
print('Test data, number of images: ', len(testing_data))
batch_size = 20
train_loader = DataLoader(train_tensors, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_tensors, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['checked', 'unchecked', 'other']
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
However, I am getting this error:
RuntimeError: stack expects each tensor to be equal size, but got [4, 66, 268] at entry 0 and [4, 88, 160] at entry 1
This is because my images are not resized prior to PIL -> Tensor. What is the correct way of resizing data images?
| Try to utilize ImageFolder from torchvision, and assuming that images have diff size, you can use CenterCrop or RandomResizedCrop depending on your task. Check the Full list.
Here is an example:
train_dir = "data/training/"
train_dataset = datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.RandomResizedCrop(img_size), # image size int or tuple
# Add more transforms here
transforms.ToTensor(), # convert to tensor at the end
]))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
| https://stackoverflow.com/questions/72257550/ |
Selecting entries of a pytorch tensor with another tensor | I have a tensor a with with float entries and torch.Size([64,2]) and I also have a tensor b with torch.Size([64]). The entries of b are only 0 or 1.
I would like to get a new tensor c with torch.Size([64]) such that c[i] == a[i,b[i]] for every index i. How can I do that?
My attempt
I tried with torch.gather but without success. The following code gives me RuntimeError: Index tensor must have the same number of dimensions as input tensor
import torch
a = torch.zeros([64,2])
b = torch.ones(64).long()
torch.gather(input=a, dim=1,index=b)
Any help will be highly appreciated!
| You can perform this straight with an indexing of a on both dimensions:
On dimension=0: a "sequential" indexing using torch.arange.
On dimension=1: indexing using b.
All in all, this gives:
>>> a[torch.arange(len(a)), b]
Alternatively you can use torch.gather, the operation you are looking for is:
# c[i] == a[i,b[i]]
The provided gather operation when applied on dim=1 provides something like:
# c[i,j] == a[i,b[i,j]]
As you can see, we need to account for the difference in shapes between between a and b. To do so, you can unsqueeze a singleton dimension on b (annotated with the letter j above) such than #b=(64, 1), for instance with b.unsqueeze(-1) or b[...,None]:
>>> a.gather(dim=1, index=b[...,None]).flatten()
| https://stackoverflow.com/questions/72262957/ |
FileNotFoundError in module easyocr when running exe file | I am trying to run exe file and scan picture with easyocr, but here's occurs the error. Could someone help me, please?
Traceback (most recent call last):
File "threading.py", line 954, in _bootstrap_inner
File "threading.py", line 892, in run
File "ZhongDon.py", line 26, in ocr_simp
File "easyocr\easyocr.py", line 200, in __init__
File "easyocr\easyocr.py", line 261, in setLanguageList
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\Idensas\\AppData\\Local\\Temp\\_MEI44842\\easyocr\\character\\ch_sim_char.txt'
There are also warnings, but I ignored them
[4484] WARNING: file already exists but should not: C:\Users\Idensas\AppData\Local\Temp\_MEI44842\torch\_C.cp39-win_amd64.pyd
torch\_jit_internal.py:750: UserWarning: Unable to retrieve source for @torch.jit._overload function: <function _DenseLayer.forward at 0x0000015A402A1D30>.
warnings.warn(f"Unable to retrieve source for @torch.jit._overload function: {func}.")
torch\_jit_internal.py:750: UserWarning: Unable to retrieve source for @torch.jit._overload function: <function _DenseLayer.forward at 0x0000015A402D8040>.
warnings.warn(f"Unable to retrieve source for @torch.jit._overload function: {func}.")
| This has solved my problem
pyinstaller -F ZhongDon.py --collect-all easyocr
Found the solution here
| https://stackoverflow.com/questions/72265507/ |
Converting images to Pytorch tensors loses label data | I have a dataset of images. In the below code, I am trying to convert them to Pytorch tensors by first converting them to PIL images:
# choose the training and test datasets
train_data = os.listdir('data/training/')
testing_data = os.listdir('data/testing/')
train_tensors = []
test_tensors = []
# Print out some stats about the training and test data
print('Train data, number of images: ', len(train_data))
print('Test data, number of images: ', len(testing_data))
# The transformation call to resize images and transform them into Tensors
transform = transforms.Compose([
transforms.RandomResizedCrop((120,120)),
transforms.PILToTensor()
])
# Converting every train/test image to a PIL image and then to a Pytorch tensor
for train_image in train_data:
img = Image.open('data/training/' + train_image)
train_tensors.append(transform(img))
for test_image in testing_data:
img = Image.open('data/testing/' + test_image)
test_tensors.append(transform(img))
However, in this process, the labels are completely lost. This is the output of train_tensors
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255]],
[[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
...,
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255]],
[[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 254, 254],
...,
[254, 254, 255, ..., 254, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255]],
[[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
...,
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255],
[255, 255, 255, ..., 255, 255, 255]]], dtype=torch.uint8)
When I use this tensor in a dataloader and try to extract the labels, I get a too many values to unpack error.
train_loader = DataLoader(train_tensors, batch_size=batch_size, shuffle=True)
dataiter = iter(train_loader)
images, labels = dataiter.__next__() #
How can I maintain my label data?
| You had appended only transformed images to train_tensors with no labels.
Therefore, dataiter.__next__() will give you only the tensor you'd appended, but at the same time you're requiring 2 variables images and labels.
Append label information to the list or request for only one element will solve this error.
| https://stackoverflow.com/questions/72267363/ |
What is the alternative of pytorch module.register_parameter(name, param) in tensorflow? | I'm trying to convert some pytorch code to tensorflow. In the pytorch code they are adding some extra parameter on every module of a model using module.register_parameter(name, param). How can i covert this part of code on tensorflow?
Sample code on below:
for module_name, module in self.model.named_modules():
module.register_parameter(name, new_parameter)
| tf.Variable is the equivalent of nn.Parameter in PyTorch. tf.Variable is mainly used to store model parameters since their values are constantly updated during training.
To use a tensor as a new model parameter, you need to convert it to tf.Variable. You can check here how to create variables from tensors.
If you want to add a model parameter in TensorFlow inside the model itself, you could simply create a variable inside the model class and it will be automatically registered as a model parameter by TensorFlow.
If you want to add a tf.Variable externally to a model as a model parameter, you could manually add it to the trainable_weights attribute of tf.keras.layers.Layer by extending it like this -
model.layers[-1].trainable_weights.extend([new_parameter])
| https://stackoverflow.com/questions/72267884/ |
What is the different for torchvision.models.resnet and torch.hub.load? | There are two method for using resnet of pytorch.
methods 1:
import torch
model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet50', pretrained=True)
model.eval()
methods 2:
import torch
net = models.resnet50(pretrained=True)
Are they load the same model. If not what is the difference?
| The only difference that there is between your models if you load them in that way it's the number of layers, since you're loading resnet18 with Torch Hub and resnet50 with Models (thus, also the pretrained weights). They behave differently, you can see more about that in this paper.
Torch Hub also lets you publish pretrained models in your repository, but since you're loading it from 'pytorch/vision:v0.10.0' (which is the same repository from which Models is loading the neural networks), there should be no difference between:
model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=True)
and
model = models.resnet18(pretrained=True)
| https://stackoverflow.com/questions/72268344/ |
Externally add a Variable to a model in TensorFlow | I have an existing TensorFlow model, and I want to add a new "parameter" (a tf.Variable) to the model's list of parameters (such that it's trainable) and add it externally to the model's list of parameters / computational graph.
One approach that I tried, is to append the new parameters to the model's list of trainable weights, something like this (here new_parameter is a tf.Variable) -
model.layers[-1].trainable_weights.extend([new_parameter])
model.compile(....)
But I'm not sure if that's the best way to go about it. In PyTorch, we have nn.Parameter instead of tf.Variable, and we have register_parameter using which we can register tensors as new parameter's to the model's list of parameters. Is there any equivalent of register_parameter in TensorFlow? Or some other way to achieve the same goal?
| It is possible by custom dense layer too βΌοΈ
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
import numpy as np
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(15):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(18):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Function
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs, num_add):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
self.num_add = num_add
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, inputs):
temp = tf.add( inputs, self.num_add )
temp = tf.matmul(temp, self.kernel)
return temp
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
list_file = tf.cast( list_file, dtype=tf.int64 )
list_file = tf.constant( list_file, shape=(33, 1, 32, 32, 4), dtype=tf.int64)
list_label = tf.cast( list_label, dtype=tf.int64 )
list_label = tf.constant( list_label, shape=(33, 1, 1, 1), dtype=tf.int64)
dataset = tf.data.Dataset.from_tensor_slices(( list_file, list_label ))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
])
layer = MyDenseLayer(10, 5)
model.add(layer)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(192, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50 )
model.save_weights(checkpoint_path)
plt.plot(history.history['loss'])
plt.show()
plt.close()
input("...")
[ Output ]:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
normalization (Normalizatio (None, 32, 32, 4) 0
n)
normalization_1 (Normalizat (None, 32, 32, 4) 0
ion)
conv2d (Conv2D) (None, 30, 30, 32) 1184
max_pooling2d (MaxPooling2D (None, 15, 15, 32) 0
)
dense (Dense) (None, 15, 15, 128) 4224
reshape (Reshape) (None, 128, 225) 0
bidirectional (Bidirectiona (None, 128, 192) 247296
l)
bidirectional_1 (Bidirectio (None, 192) 221952
nal)
my_dense_layer (MyDenseLaye (None, 10) 1920
r) *** custom layer added
flatten (Flatten) (None, 10) 0
dense_1 (Dense) (None, 192) 2112
dense_2 (Dense) (None, 10) 1930
=================================================================
Total params: 480,618
Trainable params: 480,618
Non-trainable params: 0
_________________________________________________________________
| https://stackoverflow.com/questions/72268859/ |
An error while trying to implement a polynomial regression with Pytorch - Gradients are None after loss.backward() | I am trying to implement a custom polynomial regression using PyTorch but during the training procedure my implementation fails to calculate the gradients; i.e. the weights are always None even after the loss.backward() command. Below I give all the necessary details.
Step 1 I generate some data points with the following commands:
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch.autograd import Function
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
seed_value = 42
np.random.seed(seed_value)
x = np.sort(np.random.rand(1000))
y = np.cos(1.2 * x * np.pi) + (0.1 * np.random.randn(1000))
and then I use train-test split from sklearn to split my data into training and test sets.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x,y,train_size = 0.7,
random_state = seed_value)
Step 2 I create the custom function named poly which returns the value of the polynomial p(x)=w0+w1x+...w5x^5, evaluated at x for given weights w.
def poly(x,w,batch_size = 10,degree = 5):
x = x.repeat(1,degree+1)
w = w.repeat(batch_size,1)
exp = torch.arange(0.,degree+1).repeat(batch_size,1)
return torch.sum(w*torch.pow(x,exp),dim=1)
Step 3 I construct the class custom_dataset which inherits from PyTorch's dataset to handle my training into batches.
class custom_dataset(Dataset):
def __init__(self,X,y):
self.x = torch.from_numpy(X).type(torch.float32).reshape(len(X),1)
self.y = torch.from_numpy(y).type(torch.float32)
def __len__(self):
return len(self.x)
def __getitem__(self,idx):
return self.x[idx], self.y[idx]
Step 4 I construct the loop handling the training procedure.
training_data = custom_dataset(X_train,y_train)
test_data = custom_dataset(X_test,y_test)
def training_loop(train_loader, w, epochs, lr, batch_size,
loss_fn = nn.MSELoss(), degree = 5):
weights = torch.tensor(w,dtype = torch.float32, requires_grad = True)
num_batches = len(train_loader)//batch_size
for epoch in range(1,epochs+1):
print(f"{5*'-'}>epoch:{epoch}<{5*'-'}")
for i,sample in enumerate(train_loader):
x,y = sample
y_preds = poly(x,weights,batch_size = batch_size)
loss = loss_fn(y,y_preds)
loss.backward() # backpropagation
weights = weights - lr*weights.grad # update - gradient descent
if (i+1) % 100 == 0:
print(f"- Batch:[{i+1}|{num_batches}]{5*' '}Samples:[{(i+1)*num_batches}|{len(train_loader)}]{5*' '}loss:{loss.item():.6f}")
return w
Step 5 I start training...
epochs = 10
lr = 1e-3
batch_size = 10
degree = 5
train_loader = DataLoader(training_data, batch_size = batch_size,
shuffle = True)
test_loader = DataLoader(test_data, batch_size = batch_size,
shuffle = True)
w = [0]*(degree+1)
w = training_loop(train_loader, w = w, epochs = 30, lr = lr,
batch_size = batch_size)
and getting the following error
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) Input In [40], in <cell line: 10>()
7 test_loader = DataLoader(test_data, batch_size = batch_size,
8 shuffle = True)
9 w = [0]*(degree+1)
---> 10 w = training_loop(train_loader, w = w, epochs = 30, lr = lr,
11 batch_size = batch_size)
Input In [39], in training_loop(train_loader, w, epochs, lr, batch_size, loss_fn, degree)
10 loss = loss_fn(y,y_preds)
11 loss.backward() # backpropagation
---> 12 weights = weights - lr*weights.grad # update - gradient descent
14 if (i+1) % 100 == 0:
15 print(f"- Batch:[{i+1}|{num_batches}{5*' '}Samples:[{(i+1)*num_batches}|{len(train_loader)}]{5*' '}loss:{loss.item():.6f}")
TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
Which means that the computation of the gradients did not affect the variable weights as it still set to None. Do you have any idea what is wrong?
| You are overwriting the weights variable on your first loop iteration which will be replaced with a copy of weights without the grad attribute. This behavior can be reproduced with the following minimal code:
>>> weights.grad = torch.ones_like(weights)
>>> for i in range(2):
... print(weights.grad)
... weights = weights - weights.grad
tensor([1., 1.])
None
To fix this, you can update the parameter using an in-place operation:
weights -= lr*weights.grad # update - gradient descent
| https://stackoverflow.com/questions/72270801/ |
MSELoss from Pytorch | I am trying to train a neural network using Pytorch. I would like the loss function to be the MSE. I tried to use torch.nn.MSELoss, however I get an error that I do not understand.
For example the following code gives me RuntimeError: Boolean value of Tensor with more than one value is ambiguous
import torch
import torch.nn as nn
model = torch.zeros(64)
model.requires_grad = True
target = torch.ones(64)
loss = nn.MSELoss(model, target)
Any help will be very much appreciated!
| Please look in Pytorch docs: https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html
You need to create an MSELoss object before calling with the target and predictions.
loss = nn.MSELoss()
input = torch.zeros(64, requires_grad=True)
target = torch.ones(64)
output = loss(input, target)
| https://stackoverflow.com/questions/72274562/ |
Is this custom PyTorch loss function differentiable | I have a custom forward implementation for a PyTorch loss. The training works well. I've checked the loss.grad_fn and it is not None.
I'm trying to understand two things:
How this function can be differentiable since there is an if-else statement on the path from input to output?
Does the path from gt (ground truth input) to loss (output) need to be differentiable? or only the path from pred (prediction input)?
Here is the source code:
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss_s = pos_loss.sum()
neg_loss_s = neg_loss.sum()
if num_pos == 0:
loss = - neg_loss_s
else:
loss = - (pos_loss_s + neg_loss_s) / num_pos
return loss
| The if statement is not part of the computational graph. It is part of the code used to build this graph dynamically (i.e. the forward function) but it isn't in itself part of it. The principle to follow is to ask yourself whether you backtrack to the leaves of the graph (tensors that do not have parents in the graph, i.e. inputs, and parameters) using grad_fn callbacks of each node, backpropagating through the graph. The answer is you can only do so if each of the operators is differentiable: in programming terms, they implement a backward function operation (a.k.a. grad_fn).
In your example, whether num_pos is equal to 0 or not, the resulting loss tensor will depend on neg_loss_s alone or on pos_loss_s and neg_loss_s. However in either cases, the resulting loss tensor remains attached to the input pred:
via one way: the "neg_loss_s" node
or the other: the "pos_loss_s" and "neg_loss_s" nodes.
In your setup, either way, the operation is differentiable.
If gt is a ground-truth tensor then it doesn't require gradient and the operation from it to the final loss doesn't need to be differentiable. This is the case in your example where both pos_inds, and neg_inds are non-differientblae because they are boolean operators.
| https://stackoverflow.com/questions/72284627/ |
Understanding broadcasting and arithmetic operations on different dimension tensors | I'm currently working on computing various similarity metrics between vectors such as cosine similarity, euclidean distance, mahalanobis distance, etc. As I'm working with vectors that can be very large, I need compute time to be minimal.
I'm struggling to understand how to work with vectors of different dimensions (they, do, however, share one dimension) and how to work with this in PyTorch.
I have two vectors, A and B with dimensions [867, 768] and [621, 768], respectively.
I am trying to compute the following:
For each v_a of the 867 vectors in A,
Subtract v_a - v_b for each of the 621 vectors in B
I'm aware that this is achieveable under the hood with the likes of scipy and numpy but I'm trying to avoid detaching and moving the vectors to the CPU for speed.
Can someone help me understand the logic of the operators required in PyTorch to achieve this?
| You could use fancy indexing on both input tensors to unsqueeze a dimension such that A and B have a shape of (1, 867, 768) and (621, 1, 768) respectively. The subtraction operation will then automatically broadcast the two tensors to identical shapes.
>>> diff = A[None]-B[:,None]
>>> diff.shape
torch.Size([621, 867, 768])
This is the typical approach when implementing batched-pairwise distances.
More specifically, notice the difference between the two variants: A[None]-B[:,None] and A[:,None]-B[None].
diff = A[None]-B[:,None] # (1, k_a, b) - (k_b, 1, b) -> (k_b, k_a, b) - (k_b, k_a, b)
diff.shape # (k_b, k_a, b)
Compared to:
diff = A[:,None]-B[None] # (k_a, 1, b) - (1, k_b, b) -> (k_a, k_b, b) - (k_a, k_b, b)
diff.shape # (k_a, k_b, b)
You can read more about broadcasting on the NumPy documentation page.
| https://stackoverflow.com/questions/72287059/ |
Deploying Pytorch only for prediction | I've trained my model locally and now I want to use it in my Kubernetes cluster. Unfortunately, all the Docker images for Pytorch are 5+ GBs because they contain the scripts for training which I won't need now. I've created my own image which is only 3.5 GBs but still huge. Is there a slim Pytorch version for predictions? If not, which parts of the package can I safely remove and how?
| No easy answer for Python version of PyTorch unfortunately (or at least none Iβm aware of).
Python, in general, is not well-suited for Docker deployments as it carries over the dependencies (even if you don't need all of their functionality, imports are often at the top of the file making your aforementioned removal infeasible for projects of PyTorch size and complexity).
There is a way out though...
torchscript
Given your trained model you can convert it to traced/scripted version (see here). After you manage that:
Inference in other languages
Write your inference code in another language, either Java or C++(see here for more info).
I have only used C++, but you might get there easier with Java, I think.
Results
Managed to get PyTorch for CPU inference to roughly ~32MB, GPU would weight more and be way more complex though and would probably need ~1GB of CUDNN dependency itself.
C++ way
Please note torchlambda project is not currently maintained and Iβm the creator, hopefully it gives you some tips at least.
See:
Dockerfile for the image build
CMake used for building
Docs for more info about compilation options etc.
C++ inference code
Additional notes:
It also uses AWS SDKs and you would have to remove them from at least these files
You don't need static compilation - it would help to reach the lowest possible (I could come up with) image size, but not strictly necessary (additional β100MBβ or so)
Final
Try Java first as itβs packaging is probably saner (although final image would probably be a little bigger)
The C++ way not tested for the newest PyTorch version and might be subject to change with basically any release
In general it takes A LOT of time and debugging, unfortunately.
| https://stackoverflow.com/questions/72288419/ |
Concatenate torch tensors | I have two tensors in PyTorch as:
a.shape, b.shape
# (torch.Size([512, 28, 2]), torch.Size([512, 28, 26]))
My goal is to join/merge/concatenate them together so that I get the shape: (512, 28, 28).
I tried:
torch.stack((a, b), dim = 2).shape
torch.cat((a, b)).shape
But none of them seem to work.
I am using PyTorch version: 1.11.0 and Python 3.9.
Help?
| Set dim parameter to 2 to concatenate over last dimension:
a = torch.randn(512, 28, 2)
b = torch.randn(512, 28, 26)
print(a.size(), b.size())
# set dim=2 to concat over 2nd dimension
c = torch.cat((a, b), dim=2)
print(c.size())
torch.Size([512, 28, 2]) torch.Size([512, 28, 26])
torch.Size([512, 28, 28])
| https://stackoverflow.com/questions/72291199/ |
Is it possible to auto-size the subsequent input of a layer following torch.nn.Flatten within torch.nn.Sequential in PyTorch? | If I have the following model class for example:
class MyTestModel(nn.Module):
def __init__(self):
super(MyTestModel, self).__init__()
self.seq1 = nn.Sequential(
nn.Conv2d(3, 6, 3),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 3),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(myflattendinput(), 120), # how to automate this?
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 2),
)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.seq1(x)
x = self.softmax(x)
return x
I know, normally you would let the data loader give a fixed size input to the model, thus having a fixed size for the input of the layer after nn.Flatten(), however I was wondering if you could somehow compute this automatically?
| PyTorch (>=1.8) has LazyLinear which infers the input dimension.
| https://stackoverflow.com/questions/72291704/ |
Tensor shape for multivariable LSTM on Pytorch | I have a dataset with 8 features and 4 timesteps. I am trying to implement an LSTM but need help understanding if i have set my tensor correctly. The aim is to take the outputted features from the LSTM and pass them through a NN.
My tensor shape is currently #samples x #timesteps x #features i.e. 4500x4x8. This works with the code below. I want to make sure that the model is indeed taking each timestep matrix as a new sequence (with matrix 4500x[0]x8 being the first timestep matrix and 4500x[3]x8 being the last timestep). I then take the final timestep output (output[:,-1,:] to feed through a NN.
Is the code doing what i think it is doing? I ask as performance is marginally less than a simple RF that only uses the final timestep data. This would be unexpected as the data has strong time-series correlations (it tracks patients vitals declining before going on ventilation).
I have the following code:
class LSTM1(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM1, self).__init__()
self.num_classes = num_classes #number of classes
self.num_layers = num_layers #number of layers
self.input_size = input_size #input size
self.hidden_size = hidden_size #hidden state
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True) #lstm
self.fc_1 = nn.Linear(hidden_size, 32) #fully connected 1
self.fc_2 = nn.Linear(32, 12) #fully connected 1
self.fc_3 = nn.Linear(12, 1)
self.fc = nn.Sigmoid() #fully connected last layer
self.relu = nn.ReLU()
def forward(self,x):
h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #hidden state
c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) #internal state
# Propagate input through LSTM
output, (hn, cn) = self.lstm(x, (h_0, c_0)) #lstm with input, hidden, and internal state
out = output[:,-1,:] #reshaping the data for Dense layer next
out = self.relu(out)
out = self.fc_1(out) #first Dense
out = self.relu(out) #relu
out = self.fc_2(out) #2nd dense
out = self.relu(out) #relu
out = self.fc_3(out) #3rd dense
out = self.relu(out) #relu
out = self.fc(out) #Final Output
return out
| Error
Your error stems from the last three lines.
Do not use ReLU activation at the end of your network
Use nn.Linear -> nn.Sigmoid with BCELoss or
nn.Linear with nn.BCEWithLogitsLoss (see here for what logits are).
What is going on
With ReLu you output values in the range [0, +inf)
Applying sigmoid on top of it βsquashesβ values to (0, 1) with threshold being 0 (e.g. 0 becomes 0.5 probability, hence 1 after threaholding at 0.5!)
In effect, you always predict 1 with this code, which is not what you want probably
| https://stackoverflow.com/questions/72295207/ |
Integer partition N into M parts in parallel | I am trying to randomly generate integer partitions (N into M parts) in pytorch with a minimum partition size of 1.
For example, (3, 1, 1) and (4, 1, 0) are both partitions of 5 into 3 parts but (4, 1, 0)'s minimum partition size is 0 so this should not be allowed
I would like to use this to generate my dataset on demand, so would nice if there was a pytorch (parrallel/gpgpu) solution.
See other questions about generating integer partitions:
Generate restricted weak integer compositions (or partitions) of an integer n into k parts in Python
Elegant Python code for Integer Partitioning
| Here's a solution that works in limited cases, M hast to divide N, 2 has to divide M, and the maximium is limited, but this is the behaviour I wanted.
You start of with the equal partition then calculate a delta that sums to zero...
M = 4
N = 16
MINIMUM = 1
assert N % M == 0
assert M % 2 == 0
avg = N // M
equal_partition = torch.full((M,), avg)
half_delta = torch.randint(-(avg-MINIMUM), avg-MINIMUM, (M // 2,))
delta = torch.cat((half_delta, -half_delta), dim=-1)[torch.randperm(M)]
partition = equal_partition + delta
print(partition, partition.sum())
| https://stackoverflow.com/questions/72297157/ |
Failed to use transforms.ToTensor and transforms.Normalize to normalize the MNIST dataset | I used the following code to normalize the MNIST dataset, when I print the first sample, it fails to normalize as the max element is 255, not 1.
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_set = torchvision.datasets.MNIST(
root=data_dir, train=True, download=True, transform=train_transform)
When I check the range of the dataset input images:
print("min:%f max:%f" %(train_set.data.min(), train_set.data.max()))
output result:min:0.000000 max:255.000000
I was expecting [0, 1] instead, I don't know why that is. Is there something wrong?
| The reason why you have a range of [0,255] is that you are accessing the underlying data of the dataset via the data attribute. This means the transforms have not been applied yet to the data.
>>> train_transform = T.Compose([T.ToTensor()])
>>> train_set = torchvision.datasets.MNIST(
root='.', train=True, download=True, transform=train_transform)
Your access of the data:
>>> f'min:{train_set.data.min()} max:{train_set.data.max()}'
min:0.000000 max:255.000000
You have to access the dataset by its proper interface in order for the transform pipeline to take effect. To make sure you could unroll the entire dataset's inputs into a tensor and look at its range:
>>> x, y = zip(*train_set)
>>> x_ = torch.stack(x)
>>> f'min:{x_.min()} max:{x_.max()}'
min:tensor(0.) max:tensor(1.)
| https://stackoverflow.com/questions/72297488/ |
Why is my output predicting the same label using pretrained Alexnet in pytorch? | I'm attempting to use a pretrained alexnet model for CIFAR10 dataset however it always predicts everything as the same class. I use the exact same code except using alexnet untrained and it works as intended. Why is it doing this?
Here is my code:
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
net = models.alexnet(pretrained=True).to(device)
transform = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2, )
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.005, momentum=0.9)
for epoch in range(3): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device),labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
running_loss = 0.0
print('Finished Training')
After the code I print the accuracy of each class. And it predicts every image as a plane.
| Pytorch AlexNet was trained on ImageNet so the classifier is with 1000 classes.
CIFAR10 is a 10 classes dataset.
You should create a new classifier before training with CIFAR10.
I found this post By Dr. Vaibhav Kumar that should explain how to do so.
| https://stackoverflow.com/questions/72297922/ |
DataLoader returns multiple values sequentially instead of a list or tuple | def __init__():
def __len__():
def __getitem__(self, idx):
cat_cols = (self.cat_cols.values.astype(np.float32))
cont_cols = (self.cont_cols.values.astype(np.float32))
label = (self.label.astype(np.int32))
return (cont_cols[idx], cat_cols[idx], label[idx])
When I used the dataloader in the above class, I get the cont_cols, cat_cols and label as outputs with index 0, 1 and 2. Whereas I want them together. I have tried returning values as dictionary but then I have indexing issues.
I have to read the output of dataloader as
dl = DataLoader(dataset[0], batch_size = 1)
for i, data in enumerate(dl):
if i == 0:
cont = data
if i == 1:
cat = data
if i == 2:
label = data
Currently my output for
for i, data in enumerate(dl):
print(i, data)
is
0 tensor([[3.2800e+02, 4.8000e+01, 1.0000e+03, 1.4069e+03, 4.6613e+05, 5.3300e+04,
0.0000e+00, 5.0000e+00, 1.0000e+00, 1.0000e+00, 2.0000e+00, 7.1610e+04,
6.5100e+03, 1.3020e+04, 5.2080e+04, 2.0040e+03]])
1 tensor([[ 2., 1., 1., 4., 2., 17., 0., 2., 3., 0., 4., 4., 1., 2.,
2., 10., 1.]])
2 tensor([1], dtype=torch.int32)
What I want is the output to be accessed by data[0], data[1] and data[2] but the dataloader gives me back only data[0]. It is returning the cont_cols first, then cat_cols and then label.
| I think you got confused here, your dataset can indeed return tuples but you have to handle it differently.
Your dataset is defined as:
class MyDataset(Dataset):
def __init__(self):
pass
def __len__():
pass
def __getitem__(self, idx):
cat_cols = (self.cat_cols.values.astype(np.float32))
cont_cols = (self.cont_cols.values.astype(np.float32))
label = (self.label.astype(np.int32))
return (cont_cols[idx], cat_cols[idx], label[idx])
Then you define your dataset and data loader. Note, you should not provide dataset[0] here, but instead dataset:
>>> dataset = Dataset()
>>> dl = DataLoader(dataset, batch_size=1)
Then access your dataloader content in a loop:
>>> for cont, cat, label in dl:
... print(cont, cat, label)
| https://stackoverflow.com/questions/72300479/ |
Find PyTorch model parameters that don't contribute to loss | In PyTorch (v1.10) Distibuted DataParallel, unused parameters in a model that don't contribute to the final loss can raise a RuntimeError (as mentioned in this other question, this PyTorch forums thread).
"RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one. This error indicates that your module has parameters that were not used in producing loss. You can enable unused parameter detection by passing the keyword argument find_unused_parameters=True to torch.nn.parallel.DistributedDataParallel, and by making sure all forward function outputs participate in calculating loss."
Although it's possible to inspect which parameters are affected at error-time (as mentioned above, or setting env var TORCH_DISTRIBUTED_DEBUG="INFO"), it seems like there should be a way to statically inspect a model to locate (and presumably prune or disable gradient on) parameters that aren't contributing to the current loss objective?
So given a torch.nn.Module-based model whose forward() function returns some loss tensor (maybe alongside others) - How can we programmatically, before starting to train, find all parameters (including nested modules) that aren't contributing to loss?
| By default, PyTorch tensors that are the result of some computation record their history, that is their ancestors. This is needed for the backward pass to compute the gradient.
We can make use of this to find all tensors that contribute to some new tensors by just going through the whole history.
Note that this works for a static network that always has the same architecture. As soon as you have conditionals that e.g. depend on some intermediate value this won't work, and I claim in that case it is impossible to find what tensors are involved in advance. (It's similar to the halting problem.)
import torch
import torch.nn as nn
# Example of a simple network
class Net(nn.Module):
def __init__(self):
super().__init__()
self.x = nn.Parameter(torch.tensor([999999.0])) # not contributing
self.layers = nn.ModuleList([nn.Sequential(nn.Linear(1, 4), nn.Linear(4, 1)) for _ in range(3)])
def forward(self, x):
for m in self.layers: x = m(x) + x
return x
net = Net()
x = torch.ones((1, 1))
# compute the forward pass to create the computation graph
y = net(x)
# use computation graph to find all contributing tensors
def get_contributing_params(y, top_level=True):
nf = y.grad_fn.next_functions if top_level else y.next_functions
for f, _ in nf:
try:
yield f.variable
except AttributeError:
pass # node has no tensor
if f is not None:
yield from get_contributing_params(f, top_level=False)
contributing_parameters = set(get_contributing_params(y))
all_parameters = set(net.parameters())
non_contributing = all_parameters - contributing_parameters
print(non_contributing) # returns the [999999.0] tensor
| https://stackoverflow.com/questions/72301628/ |
Should I normalize or weight losses when combining them in pytorch? | network architecture
I have a neural network with 3 heads, one of them with a focal loss and two others with L1 losses. They are combined by summing: loss = hm_loss + off_loss + wh_loss
However the range of typical values for loss elements are different. Is it an issue? Should I weight the loss elements, or should I normalize the network outputs?
| This is a typical challenge when performing multi-task learning. There are many methods to handle this, but as for all things in this field, there is no single solution to solve them all. The most straightforward approach is to weigh the different loss components indeed. You can do so by performing a grid search or random search on the three weights or try and level the three components of your loss by looking at the orders of magnitude for each of them. The general idea behind this is if you're giving high precedence for one of the loss terms, then the gradient corresponding to this term will be much more prominent when performing back propagation and parameter update.
I recommend you read more on multi-task learning. For example you could start with Multi-Task Learning for Dense Prediction Tasks A Survey: Simon Vandenhende et al., in TPAMI'21.
| https://stackoverflow.com/questions/72302269/ |
Efficiently insert alternate rows and columns in Python | I need to alternate Pytorch Tensors (similar to numpy arrays) with rows and columns of zeros. Like this:
Input => [[ 1,2,3],
[ 4,5,6],
[ 7,8,9]]
output => [[ 1,0,2,0,3],
[ 0,0,0,0,0],
[ 4,0,5,0,6],
[ 0,0,0,0,0],
[ 7,0,8,0,9]]
I am using the accepted answer in this question that proposes the following
def insert_zeros(a, N=1):
# a : Input array
# N : number of zeros to be inserted between consecutive rows and cols
out = np.zeros( (N+1)*np.array(a.shape)-N,dtype=a.dtype)
out[::N+1,::N+1] = a
return out
The answers works perfectly, except that I need to perform this many times on many arrays and the time it takes has become the bottleneck. It is the step-sized slicing that takes most of the time.
For what it's worth, the matrices I am using it for are 4D, an example size of a matrix is 32x18x16x16 and I am inserting the alternate rows/cols only in the last two dimensions.
So my question is, is there another implementation with the same functionality but with reduced time?
| I am not familiar to Pytorch, but to accelerate the code that you provided, I think JAX library will help a lot. So, if:
import numpy as np
import jax
import jax.numpy as jnp
from functools import partial
a = np.arange(10000).reshape(100, 100)
b = jnp.array(a)
@partial(jax.jit, static_argnums=1)
def new(a, N):
out = jnp.zeros( (N+1)*np.array(a.shape)-N,dtype=a.dtype)
out = out.at[::N+1,::N+1].set(a)
return out
will improve the runtime about 10 times on GPU. It depends to array size and N (The increase in the sizes, the better performances). You can see Benchmarks on my Colab link based on the 4 answer proposed so far (JAX beats the others).
I believe that jax can be one of the best libraries for your case if you could adjust it on your problem (It is possible).
| https://stackoverflow.com/questions/72309710/ |
Fastai Regression model with observation weight | Is it possible to have a costume mean squared error function with sample weight for each observation?
I am able to utilize the standard fastai training loop and I am able to implement this costume loss in PyTorch.
How to put that to fastai learner object on tabular data?
I know keras has this already implemented in the .fit method where sample_weight argument present.
def weighted_mse_loss(input, target, weight):
return torch.sum(weight * (input - target) ** 2)
from fastai.tabular.all import *
import seaborn as sns
df = sns.load_dataset('tips')
df = df.assign(sample_weight = np.random.normal(size = df.shape[0], loc = 10, scale = 2))
y = ['total_bill']
cont = ['tip']
cat = ['sex', 'smoker', 'day', 'time', 'size']
procs = [Normalize, Categorify]
df["Y"] = np.log(df[y] + 1)
MIN = df["Y"].min()
MAX = df["Y"].max()
splits = RandomSplitter(valid_pct=0.2)(range_of(df))
to = TabularPandas(
df,
procs=procs,
cat_names=cat,
cont_names=cont,
y_names="Y",
splits=splits,
y_block=RegressionBlock(n_out = 1),
)
dls = to.dataloaders(
bs=64, shuffle_train=True
)
config = tabular_config(
embed_p=0.05,
y_range=[0, MAX * 1.1],
bn_final=False,
ps=[0.05, 0.05, 0.05],
)
learner = tabular_learner(
dls,
layers=[1000, 500, 250],
config=config,
wd=0.2,
metrics=[rmse,],
)
learner.fit_one_cycle(40, lr_max = 0.01,
wd = 0.1)
| I am using this workaround:
in y_names for TabularPandas, you can return the tuple of (weight, y) as
to = TabularPandas(df,
procs=procs,
cat_names=cat,
cont_names=cont,
y_names=["sample_weight","Y"],
splits=splits,
y_block=RegressionBlock(n_out = 1))
In your loss function, split your target to (weights, target) and apply weights to loss, e.g.:
class SampleWeightedCE(torch.nn.modules.loss._Loss):
def __init__(self):
super(SampleWeightedCE, self).__init__()
self.ce_loss = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, output, tgt):
weights = tgt[:,0].unsqueeze(1)
target = tgt[:,1].unsqueeze(1)
losses = self.ce_loss(output, target) * weights
return torch.sum(losses) / torch.sum(weights)
If you want to measure metrics, you can use the same workaround, such as:
def accuracy_W(inp, tgt, thresh=0.5, sigmoid=True):
weights = tgt[:,0].unsqueeze(1)
target = tgt[:,1].unsqueeze(1)
if sigmoid: inp = inp.sigmoid()
classes = (inp >= thresh)
m_target = (target >= 0.5)
correct = (m_target == classes)
return torch.sum(weights * correct) / torch.sum(weights)
In get_preds() or predict(), you need to split the target
y_prob, y_out = learn.get_preds(ds_idx=1, with_input=False, with_loss=False, reorder=False)
weights = y_out[:,0]
target = y_out[:,1]
| https://stackoverflow.com/questions/72323732/ |
pytorch tensor change dimensionallity to count adjacent values | My objective it to count all adjacent unique values of a tensor x.
Say my tensor is (x looks like a list but it is a pytorch tensor)
x = [1,2,1,2,4,5]
I would want my output to be:
[1,2] = 2
[2,1] = 1
[2,4] = 1
[4,5] = 1
I thought about changing the dimensionality of the tensor to look like:
x = [[1,2],[2,1],[1,2],[2,4],[4,5]]
using tensor.view but couldn't find a solution that works for a tensor of any length.
Any ideas if this is even the best way to go about this? is there some built-in function?
| As @ihdv showed, you can stack shifted views of x with torch.stack or torch.vstack in order to get a tensor of pairs with overlapping windows.
>>> p = torch.vstack((x[:-1], x[1:]))
tensor([[1., 2., 1., 2., 4.],
[2., 1., 2., 4., 5.]])
Then you can apply torch.unique on it to get the statistics:
>>> p.unique(dim=1, return_counts=True)
(tensor([[1., 2., 2., 4.],
[2., 1., 4., 5.]]), tensor([2, 1, 1, 1]))
| https://stackoverflow.com/questions/72327447/ |
How to get the input and output feature maps of a CNN model? | I am trying to find the dimensions of an image as it goes through a convolutional neural network at each layer. So for instance, if there is max-pooling or convolution being applied, Iβd like to know the shape of the image at that layer, for all layers. I know I can use the nOut=image+2p-f / s + 1 formula but it would be too tedious and complex given the size of the PyTorch model. Is there a simple way to do this, perhaps a visualization tool/script or something?
| You can use the torchinfo library: https://github.com/TylerYep/torchinfo
Let's take their example:
from torchinfo import summary
model = ConvNet()
batch_size = 16
summary(model, input_size=(batch_size, 1, 28, 28))
Here (1, 28, 28) is the input's size, which is (Channel, Width, Height) of the image respectively.
The library will print:
================================================================================================================
Layer (type:depth-idx) Input Shape Output Shape Param # Mult-Adds
================================================================================================================
SingleInputNet -- -- -- --
ββConv2d: 1-1 [7, 1, 28, 28] [7, 10, 24, 24] 260 1,048,320
ββConv2d: 1-2 [7, 10, 12, 12] [7, 20, 8, 8] 5,020 2,248,960
ββDropout2d: 1-3 [7, 20, 8, 8] [7, 20, 8, 8] -- --
ββLinear: 1-4 [7, 320] [7, 50] 16,050 112,350
ββLinear: 1-5 [7, 50] [7, 10] 510 3,570
================================================================================================================
Total params: 21,840
Trainable params: 21,840
Non-trainable params: 0
Total mult-adds (M): 3.41
================================================================================================================
Input size (MB): 0.02
Forward/backward pass size (MB): 0.40
Params size (MB): 0.09
Estimated Total Size (MB): 0.51
================================================================================================================
I think 7 is wrong in this output though. It should be 16.
| https://stackoverflow.com/questions/72329948/ |
Matrix 2D on Convolutional Netowrk | that may be a silly question, but I wanted to use a convolutional neural network in my deep reinforcement learning project and I got a problem I don't understand.
In my project I want to insert into network matrix 6x7 which should be equivalent to black and white picture of 6x7 size (42 pixels) right?
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.model = torch.nn.Sequential()
self.model.add_module("conv_1", torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=4, stride = 1))
self.model.add_module("relu_1", torch.nn.ReLU())
self.model.add_module("max_pool", torch.nn.MaxPool2d(2))
self.model.add_module("conv_2", torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=4, stride = 1))
self.model.add_module("relu_2", torch.nn.ReLU())
self.model.add_module("flatten", torch.nn.Flatten())
self.model.add_module("linear", torch.nn.Linear(in_features=16*16*16, out_features=7))
def forward(self, x):
x = self.model(x)
return x
In conv1 in_channels=1 because I have got only 1 matrix (if it was image recognition that means 1 color). Other in_channels and out_channels are kind of random until linear. I have no idea where I should insert the size of a matrix, but the final output should be a size of 7 which i typed in linear.
The error i get is:
RuntimeError: Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [6, 7]
| There are a few problems with your code. First, the reason you're getting that error message is because the CNN is expecting a tensor with shape (N, Cin, Hin, Win), where:
N is the batch size
Cin is the number of input channels
Hin is the input image pixel height
Win is the input image pixel width
You're only providing the width and height dimensions. You need to explicitly add a channels and batch dimension, even if the value of those dimensions is only 1:
model = CNN()
example_input = torch.randn(size=(6, 7)) # this is your input image
print(example_input.shape) # should be (6, 7)
output = model(example_input) # you original error
example_input = example_input.unsqueeze(0).unsqueeze(0) # adds batch and channels dimension
print(example_input.shape) # should now be (1, 1, 6, 7)
output = model(example_input) # no more error!
You'll note however, you get a different error now:
RuntimeError: Calculated padded input size per channel: (1 x 2). Kernel size: (4 x 4). Kernel size can't be greater than actual input size
This is because after the first conv layer, your data is of shape 1x2, but your kernel size for the second layer is 4, which makes the operation impossible. An input image of size 6x7 is quite small, either reduce the kernel size to something that works, or use a larger images.
Here's a working example:
import torch
from torch import nn
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.model = torch.nn.Sequential()
self.model.add_module(
"conv_1",
torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=2, stride=1),
)
self.model.add_module("relu_1", torch.nn.ReLU())
self.model.add_module("max_pool", torch.nn.MaxPool2d(2))
self.model.add_module(
"conv_2",
torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=2, stride=1),
)
self.model.add_module("relu_2", torch.nn.ReLU())
self.model.add_module("flatten", torch.nn.Flatten())
self.model.add_module("linear", torch.nn.Linear(in_features=32, out_features=7))
def forward(self, x):
x = self.model(x)
return x
model = CNN()
x = torch.randn(size=(6, 7))
x = x.unsqueeze(0).unsqueeze(0)
output = model(x)
print(output.shape) # has shape (1, 7)
Note, I changed the kernel_size to 2, and the final linear layer has an input size of 32. Also, the output has shape (1, 7), the 1 is the batch_size, which in our case was only 1. If you want just the 7 output features, just use x = torch.squeeze(x).
| https://stackoverflow.com/questions/72331271/ |
Why mini-batches larger than 1 doesn't work, but larger accumulating gradients work? | I am trying to implement a neural network approximating the logical XOR function, however, the network only converges when using a batch size of 1.
I don't understand why: when I use gradient accumulation with multiple mini-batches of size 1, the convergence is very smooth, but mini-batches of size 2 or more don't work at all.
This issue arises, whatever the learning rate, and I have the same issue with another problem(more complex) than XOR.
I join my code for reference:
import numpy as np
import torch.nn as nn
import torch
import torch.optim as optim
import copy
#very simple network
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(2,3,True)
self.fc1 = nn.Linear(3,1, True)
def forward(self, x):
x = torch.sigmoid(self.fc(x))
x = self.fc1(x)
return x
def data(n): # return n sets of random XOR inputs and output
inputs = np.random.randint(0,2,2*n)
inputs = np.reshape(inputs,(-1,2))
outputs = np.logical_xor(inputs[:,0], inputs[:,1])
return torch.tensor(inputs, dtype = torch.float32),torch.tensor(outputs, dtype = torch.float32)
N = 4
net = Net() # first network, is updated with minibatches of size N
net1 = copy.deepcopy(net) # second network, updated with N minibatches of size 1
inputs = torch.tensor([[0,0],[0,1],[1,0],[1,1]], dtype = torch.float32)
labels = torch.tensor([0,1,1,0], dtype = torch.float32)
optimizer = optim.SGD(net.parameters(), lr=0.01)
optimizer1 = optim.SGD(net1.parameters(), lr=0.01)
running_loss = 0
running_loss1 = 0
for epoch in range(25000): # loop over the dataset multiple times
# get the inputs; data is a list of [inputs, labels]
input, labels = data(N)
# zero the parameter gradients
optimizer.zero_grad()
optimizer1.zero_grad()
# forward + backward + optimize
loss1_total = 0
for i in range(N):
outputs1 = net1(input[i])
loss1 = (outputs1-labels[i]).pow(2)/N # I divide by N to get the effective mean
loss1.backward()
loss1_total += loss1.item()
outputs = net(input)
loss = (outputs-labels).pow(2).mean()
loss.backward()
# optimization
optimizer.step()
optimizer1.step()
# print statistics
running_loss += loss.item()
running_loss1 += loss1_total
if epoch % 1000 == 999: # print every 1000 mini-batches
print(f'[{epoch + 1}, loss: {running_loss/1000 :.3f}, loss1: {running_loss1/1000 :.3f}')
running_loss1 = 0.0
running_loss = 0.0
print('Finished Training')
# exemples of data and outputs for reference ; network 2 always converge to the sub-optimal point(0.5,0.5)
datatest = data(4)
outputs = net(datatest[0])
outputs1 = net1(datatest[0])
inputs = datatest[0]
labels = datatest[1]
print("input",inputs)
print("target",labels)
print("net output",outputs)
print("net output",outputs1)
[EDIT] Improved readability and updated the code
result :
[1000, loss: 0.259, loss1: 0.258
[2000, loss: 0.252, loss1: 0.251
[3000, loss: 0.251, loss1: 0.250
[4000, loss: 0.252, loss1: 0.250
[5000, loss: 0.251, loss1: 0.249
[6000, loss: 0.251, loss1: 0.247
[7000, loss: 0.252, loss1: 0.246
[8000, loss: 0.251, loss1: 0.244
[9000, loss: 0.252, loss1: 0.241
[10000, loss: 0.251, loss1: 0.236
[11000, loss: 0.252, loss1: 0.230
[12000, loss: 0.252, loss1: 0.221
[13000, loss: 0.250, loss1: 0.208
[14000, loss: 0.251, loss1: 0.193
[15000, loss: 0.251, loss1: 0.175
[16000, loss: 0.251, loss1: 0.152
[17000, loss: 0.252, loss1: 0.127
[18000, loss: 0.251, loss1: 0.099
[19000, loss: 0.251, loss1: 0.071
[20000, loss: 0.251, loss1: 0.048
[21000, loss: 0.251, loss1: 0.029
[22000, loss: 0.251, loss1: 0.016
[23000, loss: 0.250, loss1: 0.008
[24000, loss: 0.251, loss1: 0.004
[25000, loss: 0.251, loss1: 0.002
Finished Training
input tensor([[1., 0.],
[0., 0.],
[0., 0.],
[0., 0.]])
target tensor([1., 0., 0., 0.])
net output tensor([[0.4686],
[0.4472],
[0.4472],
[0.4472]], grad_fn=<AddmmBackward0>)
net1 output tensor([[0.9665],
[0.0193],
[0.0193],
[0.0193]], grad_fn=<AddmmBackward0>)
Please, could you explain me why this strange phenomena is appearing ? I searched for a long time on the net, without success...
Excuse me if my question is not well formatted, it is the first time I ask a question on stack overflow.
EDIT :
I found, comparing accumulated gradients of size 1 minibatches and gradients from minibatches of size N, that the computed gradients are mostly the same, only small(but noticeable) differences appear probably due to approximation errors, so my implementation looks fine at first sight. I still don't get where does this strong convergence property of minibatches of size 1 come from.
| The problem lies in the way you define labels / compute the loss in
loss = (outputs-labels).pow(2).mean()
We have labels.shape = [4] but outputs.shape =[4, 1]. This due to the broadcasting, the difference
(outputs - labels).shape = [4, 4]
which means we compute all pairwise differences between outputs and labels (and then take their 2nd power and average them), which basically means that no meaningful supervision takes place.
The quick way to fix that here would be adding a dummy dimension here:
loss = (outputs-labels[:, None]).pow(2).mean()
but the clean way would be doing it the correct way right from the start, that is defining your labels in a ways such that labels.shape = [_, 1]:
labels = torch.tensor([[0], [1], [1], [0]], dtype=torch.float32)
(and similar in your data() function).
| https://stackoverflow.com/questions/72331452/ |
How can I make my CNN output_feature = 1 since I am testing for 0 or 1 | class BreastCancerCNN(ImageClassificationBase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 64 x 16 x 16
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 128 x 8 x 8
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 256 x 4 x 4
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 512 x 2 x 2
nn.Flatten(),
nn.Linear(256*2*2, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 1)
)
def forward(self, xb):
return self.network(xb)
@torch.no_grad()
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
print(outputs)
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), lr, weight_decay=0)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
train_losses = []
lrs = []
# Training Phase
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
# print(result)
history.append(result)
return history
Get data from the history after running one epoch
history = [evaluate(model, val_dl)]
history
This is the code that results in
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_33/1481765314.py in <module>
1 # Get data from the histrory after running one epoch
----> 2 history = [evaluate(model, val_dl)]
3 history
/tmp/ipykernel_33/559867535.py in evaluate(model, val_loader)
7 def evaluate(model, val_loader):
8 model.eval()
----> 9 outputs = [model.validation_step(batch) for batch in val_loader]
10 print(outputs)
11 return model.validation_epoch_end(outputs)
/tmp/ipykernel_33/559867535.py in <listcomp>(.0)
7 def evaluate(model, val_loader):
8 model.eval()
----> 9 outputs = [model.validation_step(batch) for batch in val_loader]
10 print(outputs)
11 return model.validation_epoch_end(outputs)
/tmp/ipykernel_33/1785597948.py in validation_step(self, batch)
8 def validation_step(self, batch):
9 images, labels = batch
---> 10 out = self(images) # Generate predictions
11 loss = F.cross_entropy(out, labels) # Calculate loss
12 acc = accuracy(out, labels) # Calculate accuracy
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/tmp/ipykernel_33/3839242004.py in forward(self, xb)
45
46 def forward(self, xb):
---> 47 return self.network(xb)
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
139 def forward(self, input):
140 for module in self:
--> 141 input = module(input)
142 return input
143
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
101
102 def forward(self, input: Tensor) -> Tensor:
--> 103 return F.linear(input, self.weight, self.bias)
104
105 def extra_repr(self) -> str:
RuntimeError: mat1 and mat2 shapes cannot be multiplied (100x2048 and 1024x512)
I would like to analyse images for a defect and thus have a scale of 0 to
1 so as to evaluate the presence of a defect in the sample images
Please help me break it down and understand what is actually going on and how do I determine the right values for the linear to get a single output that will result in a prediction of a 1 or 0
| Without going fully through your code, but looking at the comments and error messages, one can see that for the last maxpool layer:
nn.MaxPool2d(2, 2), # output: 512 x 2 x 2
That is, the number of features per image are 512 x 2 x 2 = 1024.
This is then flattened and passed through this linear layer:
nn.Linear(256*2*2, 512),
But this only accepts 256 x 2 x 2 = 1024 input features. This is what causes the mismatch when calling forward for this linear layer. To fix this, change this statement to:
nn.Linear(512*2*2, 512),
| https://stackoverflow.com/questions/72332428/ |
Conda install conflicting requirements | I am attempting to use Conda to create an environment from a Pip requirements file. The contents of the file are
requirements.txt
numpy==1.18.2
torch==1.4.0
torchvision==0.5.0
scikit-learn==0.22.2.post1
Pillow==8.3.2
pydicom==1.4.2
pandas==1.0.3
Running the command
conda create -n $name --file requirements.txt
gives a PackageNotFound error as the channels are missing.
How do I amend this?
| Possible Issues
There are a few potential issues.
Conda pytorch
First, not all packages in Conda go by the same name as they do in other repositories. Part of this is due to the nature of Conda being a general package repository, rather than a language-specific one. In particular, the torch module is delivered via the Conda pytorch package.
So that has to change.
NumPy version unavailable
That particular build of NumPy does not appear to be available in either defaults or conda-forge channels.
$ mamba search numpy=1.18.2
No match found for: numpy=1.18.2. Search: *numpy*=1.18.2
PackagesNotFoundError: The following packages are not available from current channels:
- numpy=1.18.2
Current channels:
- https://conda.anaconda.org/conda-forge/osx-64
- https://conda.anaconda.org/conda-forge/noarch
- https://conda.anaconda.org/bioconda/osx-64
- https://conda.anaconda.org/bioconda/noarch
- https://repo.anaconda.com/pkgs/main/osx-64
- https://repo.anaconda.com/pkgs/main/noarch
- https://repo.anaconda.com/pkgs/r/osx-64
- https://repo.anaconda.com/pkgs/r/noarch
Why would this happen? For most Python packages, Conda works downstream of the PyPI repository. When new releases come out, the Conda Forge bot (for example) will auto-generate a pull request to corresponding feedstock. Sometimes these don't "just work" and need some troubleshooting to get built. Occasionally, the process to get the builds working won't finish before a new release hits. This results in a newer pull request superceding the previous one, and can lead to the old pull request being abandoned. This results in gaps in the coverage of PyPI by Conda Forge, and is exactly what happened here.
If you can tolerate a different version, conda-forge does provide v1.18.1 (below) and v1.18.4 (above).
Otherwise, if you require exact replication of package versions, then you will have to source this from PyPI. I'll show this in the end.
Channel issues
Missing channels
OP does not indicate the channel configuration. The torchvision==0.5.0 package, for example, only is available through the pytorch channel.
Masked channels
Another issue here could be the use of the channel_priority: strict setting. If this setting were used, it is possible a channel with the version required might be a priori excluded by the SAT solver simply because the package (but not the correct version) is available in a higher priority channel. These days channel_priority: flexible is the default and can be set with:
conda config --set channel_priority flexible
Solutions
Exact replication (PyPI only)
Give the package names and versions, these packages likely originated from PyPI. If you need to exactly replicate the original environment - say, for reproducing scientific results - then I'd recommend sourcing everything from PyPI. The best way to do this is to use Conda to source Python and Pip, then let Pip install the requirements.txt.
Judging from the package versions, we're talking Python 3.7 or 3.8. You'd probably be fine with just python=3.8, but [a precise guesstimate from release dates would be python=3.8.2. So, try something like:
environment.yaml
name: my_env
channels:
- conda-forge
dependencies:
- python=3.8.2
- pip
- pip:
- -r requirements.txt
Then create the environment with
conda env create -n $name -f environment.yaml
making sure the requirements.txt is in the folder with the YAML.
If adding packages to this environment later, I would recommend only using pip install. Otherwise, Conda may have issues.
Conda-only environment
Assuming the numpy=1.18.2 can be substituted, a Conda-only environment might be something like:
environment.yaml
name: my_env
channels:
- pytorch
- conda-forge
dependencies:
- python=3.8
- numpy=1.18.1 # alternatively, 1.18.4
- pytorch=1.4.0
- torchvision=0.5.0
- scikit-learn=0.22.2.post1
- pillow=8.3.2
- pydicom=1.4.2
- pandas=1.0.3
Again, creating with:
conda env create -n $name -f environment.yaml
Note that in YAML only one = is used. This would be the best approach if you plan to install additional packages through Conda in an ad hoc manner (e.g., conda install).
Mixed Conda-Pip environment
You could also try a mixed environment mostly similar to the last one, but having Pip specifically provide numpy==1.18.2. I wouldn't recommend this, since the other dependencies with definitely bring in NumPy first from Conda, and then Pip will clobber it to provide the exact version.
| https://stackoverflow.com/questions/72334363/ |
Runtime Error: mat1 and mat2 shapes cannot be multiplied (62x2304 and 1568x3) | I am un able to find error input 32*32 gray images:
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=1, # gray-scale images
out_channels=16,
kernel_size=5, # 5x5 convolutional kernel
stride=1, #no. of pixels pass at a time
padding=2, # to preserve size of input image
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(2),
)
# fully connected layers
self.out = nn.Linear(32*7*7, 3)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# flatten the output of conv2
x = x.view(x.size(0), -1)
output = self.out(x)
return output
cnn=CNN()
cnn
| Your linear layer expects input of size 32x7x7. Given that your conv1 and conv2 layers performs max pooling with stride=2, that means your network is configured for input size of 28x28 (MNIST usual input size) and not 32x32 as you expect.
Moreover, considering the values in your error message (64x2304) I assume you are working with batch_size=64, but your images are NOT 32x32, but rather 32x?? which is slightly larger than 32, resulting with a feature map of 32x8x9 after the pooling.
| https://stackoverflow.com/questions/72336317/ |
Different results in computing KL Divergence using Pytorch Distributions vs manually | I noticed the KL-Divergence term KL(Q(x)||P(x)) is computed differently when using
mean(Q(x)*(log Q(x) - log P(x)))
vs
torch.distributions.kl_divergence(Q, P)
where
Q = torch.distributions.Normal(some mean, some sigma)
P = torch.distributions.Normal(0, 1)
and when I plot the KL-divergence losses, I get this two similar but different plots:
here
Can anyone point out what is causing this difference?
The full code is below:
import numpy as np
import torch
import torch.distributions as dist
import matplotlib.pyplot as plt
def kl_1(log_qx, log_px):
"""
inputs: [B, z_dim] torch
"""
return (log_qx.exp() * (log_qx-log_px)).mean()
# ground-truth (target) P(x)
P = dist.Normal(0, 1)
mus = np.arange(-5, 5, 0.1)
sigma = 1
N = 100
kls = {"1": [], "2": []}
for mu in mus:
# prediction (current) Q(x)
Q = dist.Normal(mu, sigma)
# sample from Q
qx = Q.sample((N,))
# log prob
log_qx = Q.log_prob(qx)
log_px = P.log_prob(qx)
# kl 1
kl1 = kl_1(log_qx, log_px)
kls['1'].append(kl1.numpy())
# kl 2
kl2 = dist.kl_divergence(Q, P)
kls['2'].append(kl2.numpy())
plt.figure()
plt.scatter(mus, kls['1'], label="Q*(logQ-logP)")
plt.scatter(mus, kls['2'], label="kl_divergence")
plt.xlabel("mean of Q(x)")
plt.ylabel("computed KL Divergence")
plt.legend()
plt.show()
| You have the sample weighted by the probability density if you are computing the expected value from an integral on dx. If you are using a sample from the given distribution then you approximate the expected value as the mean directly, that corresponds to integration on d cq(x) thus d cq(x) = q(x) dx, where cq(x) is the cumulative probability function, and q(x) id the probability density funciton of the variable Q.
import numpy as np
import torch
import torch.distributions as dist
import matplotlib.pyplot as plt
def kl_1(log_qx, log_px):
"""
inputs: [B, z_dim] torch
"""
return (log_qx-log_px).mean()
# ground-truth (target) P(x)
P = dist.Normal(0, 1)
mus = np.arange(-5, 5, 0.1)
sigma = 1
N = 100
kls = {"1": [], "2": []}
for mu in mus:
# prediction (current) Q(x)
Q = dist.Normal(mu, sigma)
# sample from Q
qx = Q.sample((N,))
# log prob
log_qx = Q.log_prob(qx)
log_px = P.log_prob(qx)
# kl 1
kl1 = kl_1(log_qx, log_px)
kls['1'].append(kl1.numpy())
# kl 2
kl2 = dist.kl_divergence(Q, P)
kls['2'].append(kl2.numpy())
plt.figure()
plt.scatter(mus, kls['1'], label="Q*(logQ-logP)")
plt.scatter(mus, kls['2'], label="kl_divergence")
plt.xlabel("mean of Q(x)")
plt.ylabel("computed KL Divergence")
plt.legend()
| https://stackoverflow.com/questions/72340872/ |
How to apply the parameters (weights of a network) to a vector of inputs using PyTorch? | I have a simple "Neural Network" that takes two inputs and computes a linear combination and outputs a scalar. In the case of one single equation, i.e., in 1D, we would have: weight1*inputA + weight2*inputB = output. Now, I am interested in multi-dimensional inputs, i.e., that inputA is a vector instead. So I want my network to apply the weight1 to a vector, for example: weight1 * [input1, input2] + weight2 * [input3, input4]. In this setting, I would want the output to be a vector too: [out1, out2], where out1 = weight1*inputA + weight2*inputB. However, I don't want to change the input and output dimensions of my network. As in the 1D case, I would initialise the network as net = LinearNet(2,1), since we take two inputs, inputA and inputB, and get one output. I understand that the input and output itself are multi-dimensional, but this should not bother my network.
Below is a min. working example:
import numpy as np
import torch
from torch import nn
def f(x,a,b,c,d):
x_next = np.zeros((2,))
x_next[0] = x[0]*(a-b*x[1])
x_next[1] = -x[1]*(c-d*x[0])
return x_next #returns [2,:]
a = 1
b = 0.5
c = 1
d = 0.5
x01 = 1 #init cond. varA
x02 = 2 #init cond. varB
params = [a,b,c,d,x01,x02]
# ==================
h = 0.001
T = 10
K = int(T/h)
# forward euler approx.
x_traj_FE = np.zeros((2,K))
a, b, c, d, x_traj_FE[0,0], x_traj_FE[1,0] = params
for k in range(K-1):
x_traj_FE[:,k+1] = x_traj_FE[:,k] + h*f(x_traj_FE[:,k],a,b,c,d)
# ==================
class LinearNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearNet, self).__init__()
self.layer1 = nn.Linear(input_dim, output_dim, bias=False)
#no activation function
def forward(self, x):
x = self.layer1(x)
#no activation
return x
torch.manual_seed(789)
net = LinearNet(2,1)
x_traj = np.zeros((2,K))
a, b, c, d, x_traj[0,0], x_traj[1,0] = params
for k in range(K-1):
print(net.layer1.weight)
input1 = x_traj[:,k]
input2 = f(x_traj[:,k],a,b,c,d)
inputs = torch.Tensor(np.array([input1, input2]))
print('input: '+str(inputs))
print('shape: '+str(inputs.shape))
print('output: '+str(net(inputs)))
break
If we run this, we will receive the following output:
Parameter containing:
tensor([[0.3871, 0.5595]], requires_grad=True)
input: tensor([[ 1., 2.],
[ 0., -1.]])
shape: torch.Size([2, 2])
output: tensor([[ 1.5060],
[-0.5595]], grad_fn=<MmBackward0>)
Eventually, I would like to compute one forward euler step of the coupled ode f, where the input1 is the initial conditions of each equation within the system f, and input2 is the system evaluated at the previous step. If we do it manually, one step can be calculated as (given the weights from the network above):
weights = [0.3871, 0.5595]
x_traj = np.zeros((2,K))
a, b, c, d, x_traj[0,0], x_traj[1,0] = params
for k in range(K-1):
x_traj[:,k+1] = weights[0]*x_traj[:,k] + weights[1]*f(x_traj[:,k],a,b,c,d)
break
x_traj
And the output is:
array([[1. , 0.3871, 0. , ..., 0. , 0. , 0. ],
[2. , 0.2147, 0. , ..., 0. , 0. , 0. ]])
As we see, the output of the network differs from the manual computation. I don't see how the network computes this scalar-vector multiplication and hence, I can't understand how to retrieve the same output as with the manual computation.
| When you do inputs = torch.Tensor(np.array([input1, input2])), you are creating a tensor where the first row is input1 and the second row is input2. When you are multiplying manually, your first column is equal to what was input1 previously, and similarly for input2. In order to get equivalent results to the first case, you could change the statement in the loop to (I do not know which of the two is logically correct in your use case, however):
inp = torch.stack([x_traj[:,k], f(x_traj[:,k],a,b,c,d)],dim=0) # This gives the same matrix as `inputs` in the first case
x_traj[:,k+1] = weights[0]*inp[:,0] + weights[1]*inp[:,1] # This is equivalent to calling the network in the first case
| https://stackoverflow.com/questions/72342641/ |
What does self.model(batch) do in pytorch? | I am currently rewriting a PyTorch code to tensorflow. During this I found a line that I don't understand, so I am not able to translate it to tensorflow.
Can someone explain me what this does/ means?
self.model(batch)
| It seems like self.model is a layer/layers of a neural network, derived from nn.Module class.
The call self.model(batch) invoke's self.model's __call__ method with the argument batch.
If you inspect closely, nn.Module.__call__ do some "bookkeeping" but essentially, it calls the self.model's forward function.
| https://stackoverflow.com/questions/72346268/ |
How to plot loss when using HugginFace's Trainer? | While finetuning a model using HF's trainer.
training_args = TrainingArguments(output_dir=data_dir + "test_trainer")
metric = load_metric("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
training_args = TrainingArguments(num_train_epochs=5,per_device_train_batch_size=64,per_device_eval_batch_size=32,output_dir="test_trainer", evaluation_strategy="epoch")
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics,
)
trainer.train()
How would I be able to plot the loss in a notebook?
(Perhaps Is it possible to get a list of the loss)
| It is possible to get a list of losses. You can access the history of logs after training is complete with: trainer.state.log_history
| https://stackoverflow.com/questions/72350835/ |
Why CUDA is unavailable for using with easyocr? | According to Pytorch I used this command in cmd
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
But CUDA still unavailable. Could someone help me, please?
import torch
print(torch.cuda.is_available())
The output will be False
| You have to update driver first:
Here is a concept diagram from nvidia website
Here is another one:
More at CUDA Compatibility
| https://stackoverflow.com/questions/72352212/ |
Meaning of stacklevel in PyTorch | I came across many function in PyTorch that have _stacklevel as argument. Here an example of the Softmax module's forward() method where it is used:
def forward(self, input: Tensor) -> Tensor:
return F.softmax(input, self.dim, _stacklevel=5)
What does _stacklevel mean? What is it good for?
| stacklevel is used in python to indicate warning mechanism how far up the stack it has to go to find the line that called the function which issued the warning. For example, the code below makes the warning refer to deprecation()βs caller by using stacklevel=2, rather than to the source of deprecation() itself. stacklevel=3 would refer to the caller of deprecation()βs caller and so on.
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
See this page for more information.
Regarding the specific case you mention, in PyTorch's F.softmax, F.softmin, and F.log_softmax functions, this argument is related to the warning issued when dim is not specified. However, it seems that it should be dropped since legacy softmax dim behavior is gone, or at least clarified in the documentation. At the moment, this is only mentioned on the following open issues from pytorch repo:
pytorch/issues/36524
pytorch/issues/64038
It will probably be fixed or clarified in the future, but for the moment my recommendation is to simply ignore it.
| https://stackoverflow.com/questions/72354020/ |
Understanding gradient computation using backward() in PyTorch | I'm trying to understand the basic pytorch autograd system:
x = torch.tensor(10., requires_grad=True)
print('tensor:',x)
x.backward()
print('gradient:',x.grad)
output:
tensor: tensor(10., requires_grad=True)
gradient: tensor(1.)
since x is a scalar constant and no function is applied to it, I expected 0. as the gradient output. Why is the gradient 1. instead?
| Whenever you are using value.backward(), you compute the derivative value (in your case value == x) with respect to all your parameters (in your case that is just x). Roughly speaking, this means all tensors that are somehow involved in your computation that have requires_grad=True. So this means
x.grad = dx / dx = 1
To add to that: With the automatic differentiation you always ever compute with "constant" values: All your function or networks are always evaluated at a concrete point. And the gradient you get is the gradient evaluated at that same point. There is no symbolic computation taking place. All the information needed for the computation of the gradient is encoded in the computation graph.
| https://stackoverflow.com/questions/72362774/ |
pytorch dataloader - RuntimeError: stack expects each tensor to be equal size, but got [157] at entry 0 and [154] at entry 1 | I am a beginner with pytorch. I am trying to do an aspect based sentiment analysis. I am facing the error mentioned in the subject. My code is as follows: I request help to resolve this error. Thanks in advance. I will share the entire code and the error stack.
!pip install transformers
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
%matplotlib inline
%config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df = pd.read_csv("/Users/user1/Downloads/auto_bio_copy.csv")
I am importing a csv file which has content and label as shown below:
df.head()
content label
0 I told him I would leave the car and come back... O O O O O O O O O O O O O O O O O O O O O O O ...
1 I had the ignition interlock device installed ... O O O B-Negative I-Negative I-Negative O O O O...
2 Aug. 23 or 24 I went to Walmart auto service d... O O O O O O O B-Negative I-Negative I-Negative...
3 Side note This is the same reaction I 'd gotte... O O O O O O O O O O O O O O O O O O O O O O O ...
4 Locked out of my car . Called for help 215pm w... O O O O O O O O O O O O O O O O O B-Negative O...
df.shape
(1999, 2)
I am converting the label values into integers as follows:
O=zero(0), B-Positive=1, I-Positive=2, B-Negative=3, I-Negative=4, B-Neutral=5, I-Neutral=6, B-Mixed=7, I-Mixed=8
df['label'] = df.label.str.replace('O', '0')
df['label'] = df.label.str.replace('B-Positive', '1')
df['label'] = df.label.str.replace('I-Positive', '2')
df['label'] = df.label.str.replace('B-Negative', '3')
df['label'] = df.label.str.replace('I-Negative', '4')
df['label'] = df.label.str.replace('B-Neutral', '5')
df['label'] = df.label.str.replace('I-Neutral', '6')
df['label'] = df.label.str.replace('B-Mixed', '7')
df['label'] = df.label.str.replace('I-Mixed', '8')
Next, converting the string to integer list as follows:
df['label'] = df['label'].str.split(' ').apply(lambda s: list(map(int, s)))
df.head()
content label
0 I told him I would leave the car and come back... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
1 I had the ignition interlock device installed ... [0, 0, 0, 3, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
2 Aug. 23 or 24 I went to Walmart auto service d... [0, 0, 0, 0, 0, 0, 0, 3, 4, 4, 4, 0, 0, 0, 0, ...
3 Side note This is the same reaction I 'd gotte... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
4 Locked out of my car . Called for help 215pm w... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
PRE_TRAINED_MODEL_NAME = 'bert-base-cased'
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
token_lens = []
for txt in df.content:
tokens = tokenizer.encode_plus(txt, max_length=512, add_special_tokens=True, truncation=True, return_attention_mask=True)
token_lens.append(len(tokens))
MAX_LEN = 512
class Auto_Bio_Dataset(Dataset):
def __init__(self, contents, labels, tokenizer, max_len):
self.contents = contents
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.contents)
def __getitem__(self, item):
content = str(self.contents[item])
label = self.labels[item]
encoding = self.tokenizer.encode_plus(
content,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
#padding='max_length',
pad_to_max_length=True,
truncation=True,
return_attention_mask=True,
return_tensors='pt'
)
return {
'content_text': content,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'labels': torch.tensor(label)
}
df_train, df_test = train_test_split(
df,
test_size=0.1,
random_state=RANDOM_SEED
)
df_val, df_test = train_test_split(
df_test,
test_size=0.5,
random_state=RANDOM_SEED
)
df_train.shape, df_val.shape, df_test.shape
((1799, 2), (100, 2), (100, 2))
def create_data_loader(df, tokenizer, max_len, batch_size):
ds = Auto_Bio_Dataset(
contents=df.content.to_numpy(),
labels=df.label.to_numpy(),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(
ds,
batch_size=batch_size,
num_workers=2
)
BATCH_SIZE = 16
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
data = next(iter(train_data_loader))
data.keys()
Error is as follows:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-71-e0a71018e473> in <module>
----> 1 data = next(iter(train_data_loader))
2 data.keys()
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in __next__(self)
528 if self._sampler_iter is None:
529 self._reset()
--> 530 data = self._next_data()
531 self._num_yielded += 1
532 if self._dataset_kind == _DatasetKind.Iterable and \
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _next_data(self)
1222 else:
1223 del self._task_info[idx]
-> 1224 return self._process_data(data)
1225
1226 def _try_put_index(self):
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _process_data(self, data)
1248 self._try_put_index()
1249 if isinstance(data, ExceptionWrapper):
-> 1250 data.reraise()
1251 return data
1252
~/opt/anaconda3/lib/python3.7/site-packages/torch/_utils.py in reraise(self)
455 # instantiate since we don't know how to
456 raise RuntimeError(msg) from None
--> 457 raise exception
458
459
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 157, in default_collate
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 157, in <dictcomp>
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 138, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [157] at entry 0 and [154] at entry 1
I found in some github post that this error can be because of batch size, so i changed the batch size to 8 and then the error is as follows:
BATCH_SIZE = 8
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
data = next(iter(train_data_loader))
data.keys()
RuntimeError Traceback (most recent call last)
<ipython-input-73-e0a71018e473> in <module>
----> 1 data = next(iter(train_data_loader))
2 data.keys()
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in __next__(self)
528 if self._sampler_iter is None:
529 self._reset()
--> 530 data = self._next_data()
531 self._num_yielded += 1
532 if self._dataset_kind == _DatasetKind.Iterable and \
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _next_data(self)
1222 else:
1223 del self._task_info[idx]
-> 1224 return self._process_data(data)
1225
1226 def _try_put_index(self):
~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/dataloader.py in _process_data(self, data)
1248 self._try_put_index()
1249 if isinstance(data, ExceptionWrapper):
-> 1250 data.reraise()
1251 return data
1252
~/opt/anaconda3/lib/python3.7/site-packages/torch/_utils.py in reraise(self)
455 # instantiate since we don't know how to
456 raise RuntimeError(msg) from None
--> 457 raise exception
458
459
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 157, in default_collate
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 157, in <dictcomp>
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
File "/Users/namrathabhandarkar/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 137, in default_collate
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
RuntimeError: Trying to resize storage that is not resizable
I am not sure what is causing the first error(the one mentioned in subject). I am using padding and truncate in my code, yet the error.
Any help to resolve this issue is highly appreciated.
Thanks in advance.
| Quick answer: you need to implement your own collate_fn function when creating a DataLoader. See the discussion from PyTorch forum.
You should be able to pass the function object to DataLoader instantiation:
def my_collate_fn(data):
# TODO: Implement your function
# But I guess in your case it should be:
return tuple(data)
return DataLoader(
ds,
batch_size=batch_size,
num_workers=2,
collate_fn=my_collate_fn
)
This should be the way to solving this, but as a temporary remedy in case anything is urgent or a quick test is nice, simply change batch_size to 1 to prevent torch from trying to stack things with different shapes up.
| https://stackoverflow.com/questions/72363741/ |
manually download pytorch and feed downloaded path to anaconda | I tried to conda install pytorch ... but it doesn't even start and gives me
("Connection broken: ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)", ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
I'm thinking to download the necessary file manually(safe download even if internet is slow) and then set it to anaconda so that installs directly from downloaded file instead of downloading it self
| This can be done by downloading pip wheels for torch, torchvision and torchaudio etc. and doing pip installation for local wheels. Suitable Pytorch wheels can be downloaded from here. Now, you can simply pip install the downloaded whl files:
pip install file.whl
Here file.whl is the downloaded wheel.
| https://stackoverflow.com/questions/72363762/ |
pytorch inplace runtime error with class inheritance | May I know why this forward() function gives runtime error on inplace operation ?
Note: I have done some code debugging which leads to the following line of code:
class ConvEdge(Edge):
def __init__(self, stride):
super().__init__()
self.f = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=(3, 3), stride=(stride, stride), padding=1)
If you guys have a look at the code snippet regarding class ConvEdge(Edge) , I am actually having second thought on how inheritance is being viewed and processed by pytorch autograd library.
What do you guys think ?
| the issue is resolved using with torch.no_grad() which basically does not propagate the gradient when it is not necessary.
| https://stackoverflow.com/questions/72372542/ |
Multilabel classification using ResNet - loading sample using the dataloader takes a long time | I need to implement a ResNet-based multiclass classifier, and I am using this notebook as a starting point. At the moment, I am just going through the notebook, checking that all steps work fine. When I try to load a sample with the dataloader (command: sample = next(iter(train_loader))), I get no results even after waiting for more than an hour. Why is that?
The dataloader is defined in this cell:
#Pre-processing transformations
data_transforms = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
])
#Getting the data
cardata = CarDataset("./content/content/carimages/car_ims", transform=data_transforms,translation_dict=translation_dict)
#Split the data in training and testing
train_len = int(cardata.__len__()*0.8)
test_len = int(cardata.__len__()*0.2)
train_set, val_set = torch.utils.data.random_split(cardata, [train_len, test_len])
#Create the dataloader for each dataset
train_loader = DataLoader(train_set, batch_size=16, shuffle=True,
num_workers=4, drop_last=True)
test_loader = DataLoader(val_set, batch_size=16, shuffle=False,
num_workers=4, drop_last=True)
If I try loading the data on the GPU using
train_set.cardata.to(torch.device("cuda:0")) # put data into GPU entirely
train_set.to(torch.device("cuda:0"))
I get the error 'Subset' object has no attribute 'cardata'. Am I doing something wrong? Is it just normal that the dataloader takes so long to load the dataset images? Thanks!
| It seems there is no error at calling train/val set and loader of provided code. There might be an error inside of CarDataset() class, or maybe setting num_workers=0 could help.
Also, please avoid acquiring a sample via next(iter(train_loader)). This creates a new loader every time it is called. Use train_iter = iter(train_loader) and inputs, labels = train_iter.next() instead.
Additionally, as far as I know, putting the entire dataset to a GPU or GPUs is not supported. You can load a mini-batch data (pytorch tensor) to it: inputs = inputs.to(torch.device("cuda"))
Any advise or correcting a wrong answer is welcomed.
| https://stackoverflow.com/questions/72373689/ |
Change all images in training set | I have a convolutional neural network. And I wanted to train it on images from the training set but first they should be wrapped with my function change(tensor, float) that takes in a tensor/image of the form [hight,width,3] and a float.
Batch size =4
loading data
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
Cnn architecture
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
#size of inputs [4,3,32,32]
#size of labels [4]
inputs = change(inputs,0.1) <----------------------------
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs) #[4, 10]
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
running_loss = 0.0
print('Finished Training')
I am trying to apply the image function change but it gives an object error.
it there a quick way to fix it?
I am using a Julia function but it works completely fine with other objects. Error message:
JULIA: MethodError: no method matching copy(::PyObject)
Closest candidates are:
copy(!Matched::T) where T<:SHA.SHA3_CTX at /opt/julia-1.7.2/share/julia/stdlib/v1.7/SHA/src/types.jl:213
copy(!Matched::T) where T<:SHA.SHA2_CTX at /opt/julia-1.7.2/share/julia/stdlib/v1.7/SHA/src/types.jl:212
copy(!Matched::Number) at /opt/julia-1.7.2/share/julia/base/number.jl:113
| I would recommend to put change function to transforms list, so you do data changes on transformation stage.
partial from functools will help you to fix number of arguments, like this:
from functools import partial
def change(input, float):
pass
# Use partial to fix number of params, such that change accepts only input
change_partial = partial(change, float=pass_float_value_here)
# Add change_partial to a list of transforms before or after converting to tensors
transforms = Compose([
RandomResizedCrop(img_size), # example
# Add change_partial here if it operates on PIL Image
change_partial,
ToTensor(), # convert to tensor
# Add change_partial here if it operates on torch tensors
change_partial,
])
| https://stackoverflow.com/questions/72378315/ |
Reshape tensors in pytorch? | I'm struggling with the result of a matrix multiplication in pytorch and I don't know how to solve it, in particular:
I'm multiplying these two matrices
tensor([[[[209.5000, 222.7500],
[276.5000, 289.7500]],
[[208.5000, 221.7500],
[275.5000, 288.7500]]]], dtype=torch.float64)
and
tensor([[[[ 0., 1., 2., 5., 6., 7., 10., 11., 12.],
[ 2., 3., 4., 7., 8., 9., 12., 13., 14.],
[10., 11., 12., 15., 16., 17., 20., 21., 22.],
[12., 13., 14., 17., 18., 19., 22., 23., 24.]],
[[25., 26., 27., 30., 31., 32., 35., 36., 37.],
[27., 28., 29., 32., 33., 34., 37., 38., 39.],
[35., 36., 37., 40., 41., 42., 45., 46., 47.],
[37., 38., 39., 42., 43., 44., 47., 48., 49.]],
[[50., 51., 52., 55., 56., 57., 60., 61., 62.],
[52., 53., 54., 57., 58., 59., 62., 63., 64.],
[60., 61., 62., 65., 66., 67., 70., 71., 72.],
[62., 63., 64., 67., 68., 69., 72., 73., 74.]]]],
dtype=torch.float64)
with the following line of code A.view(2,-1) @ B, and then I reshape the result with result.view(2, 3, 3, 3).
The resulting matrix is
tensor([[[[ 6687.5000, 7686.0000, 8684.5000],
[11680.0000, 12678.5000, 13677.0000],
[16672.5000, 17671.0000, 18669.5000]],
[[ 6663.5000, 7658.0000, 8652.5000],
[11636.0000, 12630.5000, 13625.0000],
[16608.5000, 17603.0000, 18597.5000]],
[[31650.0000, 32648.5000, 33647.0000],
[36642.5000, 37641.0000, 38639.5000],
[41635.0000, 42633.5000, 43632.0000]]],
[[[31526.0000, 32520.5000, 33515.0000],
[36498.5000, 37493.0000, 38487.5000],
[41471.0000, 42465.5000, 43460.0000]],
[[56612.5000, 57611.0000, 58609.5000],
[61605.0000, 62603.5000, 63602.0000],
[66597.5000, 67596.0000, 68594.5000]],
[[56388.5000, 57383.0000, 58377.5000],
[61361.0000, 62355.5000, 63350.0000],
[66333.5000, 67328.0000, 68322.5000]]]], dtype=torch.float64)
Instead I want
tensor([[[[ 6687.5000, 7686.0000, 8684.5000],
[11680.0000, 12678.5000, 13677.0000],
[16672.5000, 17671.0000, 18669.5000]],
[[31650.0000, 32648.5000, 33647.0000],
[36642.5000, 37641.0000, 38639.5000],
[41635.0000, 42633.5000, 43632.0000]],
[[56612.5000, 57611.0000, 58609.5000],
[61605.0000, 62603.5000, 63602.0000],
[66597.5000, 67596.0000, 68594.5000]]],
[[[ 6663.5000, 7658.0000, 8652.5000],
[11636.0000, 12630.5000, 13625.0000],
[16608.5000, 17603.0000, 18597.5000]],
[[31526.0000, 32520.5000, 33515.0000],
[36498.5000, 37493.0000, 38487.5000],
[41471.0000, 42465.5000, 43460.0000]],
[[56388.5000, 57383.0000, 58377.5000],
[61361.0000, 62355.5000, 63350.0000],
[66333.5000, 67328.0000, 68322.5000]]]], dtype=torch.float64)
Can someone help me? Thanks
| This is a common but interesting problem because it involves a combination of torch.reshapes and torch.transpose to solve it. More specifically, you will need
Apply an initial reshape to restructure the tensor and expose the axes you want to swap;
Then do so using a transpose operation;
Lastly apply a second reshape to get to the desired format.
In your case, you could do:
>>> result.reshape(3,2,3,3).transpose(0,1).reshape(2,3,3,3)
tensor([[[[ 6687.5000, 7686.0000, 8684.5000],
[11680.0000, 12678.5000, 13677.0000],
[16672.5000, 17671.0000, 18669.5000]],
[[31650.0000, 32648.5000, 33647.0000],
[36642.5000, 37641.0000, 38639.5000],
[41635.0000, 42633.5000, 43632.0000]],
[[56612.5000, 57611.0000, 58609.5000],
[61605.0000, 62603.5000, 63602.0000],
[66597.5000, 67596.0000, 68594.5000]]],
[[[ 6663.5000, 7658.0000, 8652.5000],
[11636.0000, 12630.5000, 13625.0000],
[16608.5000, 17603.0000, 18597.5000]],
[[31526.0000, 32520.5000, 33515.0000],
[36498.5000, 37493.0000, 38487.5000],
[41471.0000, 42465.5000, 43460.0000]],
[[56388.5000, 57383.0000, 58377.5000],
[61361.0000, 62355.5000, 63350.0000],
[66333.5000, 67328.0000, 68322.5000]]]], dtype=torch.float64)
I encourage you to look a the intermediate results to get an idea of how the method works so you can apply it on other use cases in the future.
| https://stackoverflow.com/questions/72380259/ |
Pytorch TTS how to add output to audio file? | Is this code from this repo https://github.com/snakers4/silero-models#pytorch-1
I'd like to output the voice to an audio file, how can I do that?
code:
# V3
import torch
language = 'ru'
model_id = 'ru_v3'
sample_rate = 48000
speaker = 'ksenia'
device = torch.device('cpu')
model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models',
model='silero_tts',
language=language,
speaker=model_id)
model.to(device) # gpu or cpu
example_text = 'ΠΡΠΈΠ²Π΅Ρ, ΠΊΠ°ΠΊ Π΄Π΅Π»ΠΈΡΠΊΠΈ?'
audio = model.apply_tts(text=example_text,
speaker=speaker,
sample_rate=sample_rate)
I heard about this, but this don't work with v_2 or v_3
torchaudio.save('test_1.mp3',
audio[0].unsqueeze(0),
sample_rate=16000)
| Just remove [0]
so this will be look like this:
torchaudio.save('test_1.mp3',
audio.unsqueeze(0),
sample_rate=16000)
| https://stackoverflow.com/questions/72380294/ |
Symmetric random tensor with high dimension numpy/pytorch | I wish to create a random symmetric tensor such that for each permutation of indices i1, i2,...,ik i will have:
a[i1][i2]...[ik] = a[pi(i1)][pi(i2)]...[pi(ik)]
Where pi is a permutation function. For example:
a[1][2][3][4] = a[1][3][2][4] = a[4][2][1][3] = ...
In the 2 dim setting its the normal symmetry but i am unsure how implement this (without iterating over permutations) or if there is an automatic way of doing this using numpy / pytorch.
| AFAIK there is no function for that in either torch nor numpy, but one might add all dimensionality permutated tensors to make it symmetric across every dimension.
This has:
O(size^dims) - memory complexity (as generators are used)
O(dims! * size^dims) - runtime complexity (although size^dims is vectorized and should be reasonably quick)
No idea how to improve runtime complexity though, sorry. Here is the code:
import functools
import itertools
import operator
import torch
def symmetricND(size: int, dims: int) -> torch.Tensor:
data = torch.randn(*[size] * dims)
return functools.reduce(
operator.add,
(
torch.permute(data, permutation)
for permutation in itertools.permutations(range(dims))
),
)
And example usage (4x4x4 symmetrical tensor):
symmetric = symmetricND(4, 3)
print(symmetric[0][1][2])
print(symmetric[2][1][0])
print(symmetric[1][2][0])
print(symmetric[1][0][2])
In general: will run as long as you can hold single tensor in your memory, but might take a lot of time ((3, 7) is instant on my laptop, (3, 9)takes around 1 minute though, not sure if this is satisfactory for your case).
You could also precompute these tensors (or even use multiple machines for that), save them and load as needed.
| https://stackoverflow.com/questions/72380459/ |
Pytorch expand class label tensor | I am working on a semantic segmentation project in pytorch and I have class maps in the following shapes: [H,W] where each element is an integer between 0-n where n is the number of classes, H the height of the image and W the width of the image.
Here is an example:
test_label = torch.zeros([10,10])
test_label[:5,:5] = 1
test_label[5:,:5] = 2
test_label[:5,5:] = 3
test_label
Output:
tensor([[1., 1., 1., 1., 1., 3., 3., 3., 3., 3.],
[1., 1., 1., 1., 1., 3., 3., 3., 3., 3.],
[1., 1., 1., 1., 1., 3., 3., 3., 3., 3.],
[1., 1., 1., 1., 1., 3., 3., 3., 3., 3.],
[1., 1., 1., 1., 1., 3., 3., 3., 3., 3.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.]])
Now, what I want is something of the shape [n,C,H] where [1,C,H] would be e.g.:
tensor([[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
And [2,H,W] would be:
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2., 0., 0., 0., 0., 0.]])
Is there a pytorch function which does this? My current approach would be iteratively masking over each unique element in the original tensor and insert them into a tensor of the shape [n,H,W] initially filled with all zeros. But that doesn't seem to be the best way to do it. I tried to look it up but it seems like I am not able to find the right name for this operation.
Thank you very much for your time.
| You could apply nn.functional.one_hot to convert the dense format into one-hot encoding and then multiple with the label value to get the desired result:
>>> C = int(x.max()) + 1
>>> ohe = F.one_hot(x.long(), num_classes=C)
Then multiple by the label values:
>>> res = ohe*torch.arange(C)
>>> res.permute(2,0,1)
tensor([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 3, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])
| https://stackoverflow.com/questions/72380562/ |
how to save all the generated image in a folder in pytorch | I am trying to use data augmentation with pytorch. I want to save all the generated images in a folder (target_dir) with different numbering based on the batch index.
Here is my code. I am using epoch=100 and batch_size=128.
import os
for batch_idx in range(BATCH_SIZE):
torchvision.utils.save_image(img_grid_fake, f"C:/UserspythonProjectgenerated_image/Fake_image%{batch_idx}d.png", global_step=step)
but i am only getting last 128 generated images, previous generated image are get deleted when next epoch run.
| You need to save the images with f"Fake_image-{epoch}-{batch-idx}.png" so that both epoch and batch_idx are used in naming the files.
import os
import torch
from torch import nn
import torchvision
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
target_dir = r"C:/Users/PycharmProjects/pythonProject/generated/generated_image/"
EPOCHS = 10
BATCH_SIZE = 64
GRID_SIZE = 9 # 9 images in each grid
NUM_ROWS = 3 # sqrt(GRID_SIZE)
# if you want all the images in a batch to make the image-grid,
# set GRID_SIZE = BATCH_SIZE
train_dataset = YourFakeImageDataset()
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True, transform=ToTensor())
for epoch in range(EPOCHS):
for batch_idx, (X, y) in enumerate(train_dataloader):
# assume X is the fake-image returned by the dataloader
# and y is some target value for the X, also returned by the dataloader
# ... do something with your images here
# B, C, H, W = X.shape
img_grid_fake = torchvision.utils.make_grid(X[:GRID_SIZE, ...], nrow=NUM_ROWS)
filepath = os.path.join(target_dir, f"Fake_image-{epoch}-{batch_idx}.png")
torchvision.utils.save_image(img_grid_fake, filepath)
NOTE: I cannot answer you properly, as your question does not specify a lot of details clearly (some of them are asked by others in the comments).
If you are making a fake-image-grid, how are you doing that? With torchvision.utils.make_grid()?
References
torchvision.utils.make_grid()
Visualizing a grid of images
torchvision.utils.make_grid()
| https://stackoverflow.com/questions/72382933/ |
RuntimeError: shape '[32, 3, 224, 224]' is invalid for input of size 50176 | Firstly, I have trained a model on 224,224,3 images and now I am working on visualization taken from MNIST dataset codebase. Below code is worked fine on grayscale images but when i used for color images it didn't not work out.
Code Works fine
with torch.no_grad():
while True:
image = cv2.imread("example.png", flags=cv2.IMREAD_GRAYSCALE)
print(image.shape)
input_img_h, input_img_w = image.shape
image = scale_transformation(image, scale_factor=scale_factors[scale_idx_factor])
image = rotation_transformation(image, angle=rotation_factors[rotation_idx_factor])
scale_idx_factor = (scale_idx_factor + 1) % len(scale_factors)
rotation_idx_factor = (rotation_idx_factor + 1) % len(rotation_factors)
image_tensor = torch.from_numpy(image) / 255.
print("image_tensor.shape:", image_tensor.shape)
image_tensor = image_tensor.view(1, 1, input_img_h, input_img_w)
image_tensor = T.Normalize((0.1307,), (0.3081,))(image_tensor)
image_tensor = image_tensor.to(device)
out = model(image_tensor)
image = np.repeat(image[..., np.newaxis], 3, axis=-1)
roi_y, roi_x = input_img_h // 2, input_img_w // 2
plot_offsets(image, save_output, roi_x=roi_x, roi_y=roi_y)
save_output.clear()
image = cv2.resize(image, dsize=(224, 224))
cv2.imshow("image", image)
key = cv2.waitKey(30)
if key == 27:
break
Code with problem: I have changed image size only
with torch.no_grad():
while True:
image = cv2.imread("image_06764.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print('Original Dimensions : ', image.shape)
width = 224
height = 224
dim = (width, height)
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
# print(resized.shape[0])
input_img_h = image.shape[0]
input_img_w = image.shape[1]
image = scale_transformation(image, scale_factor=scale_factors[scale_idx_factor])
print("dfdf", image.shape)
image = rotation_transformation(image, angle=rotation_factors[rotation_idx_factor])
scale_idx_factor = (scale_idx_factor + 1) % len(scale_factors)
rotation_idx_factor = (rotation_idx_factor + 1) % len(rotation_factors)
image_tensor = torch.from_numpy(image) / 255.
print("ggggggggggg", image_tensor.size())
image_tensor = image_tensor.view(32, 3, input_img_h, input_img_w)
print("image_tensor.shape:", image_tensor.shape)
image_tensor = T.Normalize((0.1307,), (0.3081,))(image_tensor)
image_tensor = image_tensor.to(device)
out = model(image_tensor)
image = np.repeat(image[..., np.newaxis], 3, axis=-1)
roi_y, roi_x = input_img_h // 2, input_img_w // 2
plot_offsets(image, save_output, roi_x=roi_x, roi_y=roi_y)
save_output.clear()
image = cv2.resize(image, dsize=(224, 224))
cv2.imshow("image", image)
key = cv2.waitKey(30)
if key == 27:
break
Traceback
Traceback (most recent call last):
File "/media/cvpr/CM_1/tutorials/Deformable_Convolutionv_V2/offset_visualization.py", line 184, in <module>
image_tensor = image_tensor.view(32, 3, input_img_h, input_img_w)
RuntimeError: shape '[32, 3, 224, 224]' is invalid for input of size 50176
| image_tensor is a tensor size of 50176, which can be resized to 224x224. However, you're trying to resize it to 32x3x224x224.
Try this:
image_tensor = image_tensor.view(1, 1, input_img_h, input_img_w).repeat(1, 3, 1, 1)
Above code will copy the grayscale image 3 time channel-wise, resulting a tensor size of 1x3x224x224.
Additionally, why are you converting the color image to grayscale image with image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)? There will be no channel problem if you remove it.
Any advise or error correction of the answer is welcomed
| https://stackoverflow.com/questions/72387456/ |
Pytorch matrix multiplication | I'm struggling with dimension and matric multiplication in pytorch.
I want to multiply matrix A
tensor([[[104.7500, 111.3750, 138.2500, 144.8750],
[104.2500, 110.8750, 137.7500, 144.3750]],
[[356.8750, 363.5000, 390.3750, 397.0000],
[356.3750, 363.0000, 389.8750, 396.5000]]])
with matrix B
tensor([[[[ 0., 1., 2., 5., 6., 7., 10., 11., 12.],
[ 2., 3., 4., 7., 8., 9., 12., 13., 14.],
[ 10., 11., 12., 15., 16., 17., 20., 21., 22.],
[ 12., 13., 14., 17., 18., 19., 22., 23., 24.]],
[[ 25., 26., 27., 30., 31., 32., 35., 36., 37.],
[ 27., 28., 29., 32., 33., 34., 37., 38., 39.],
[ 35., 36., 37., 40., 41., 42., 45., 46., 47.],
[ 37., 38., 39., 42., 43., 44., 47., 48., 49.]],
[[ 50., 51., 52., 55., 56., 57., 60., 61., 62.],
[ 52., 53., 54., 57., 58., 59., 62., 63., 64.],
[ 60., 61., 62., 65., 66., 67., 70., 71., 72.],
[ 62., 63., 64., 67., 68., 69., 72., 73., 74.]]],
[[[ 75., 76., 77., 80., 81., 82., 85., 86., 87.],
[ 77., 78., 79., 82., 83., 84., 87., 88., 89.],
[ 85., 86., 87., 90., 91., 92., 95., 96., 97.],
[ 87., 88., 89., 92., 93., 94., 97., 98., 99.]],
[[100., 101., 102., 105., 106., 107., 110., 111., 112.],
[102., 103., 104., 107., 108., 109., 112., 113., 114.],
[110., 111., 112., 115., 116., 117., 120., 121., 122.],
[112., 113., 114., 117., 118., 119., 122., 123., 124.]],
[[125., 126., 127., 130., 131., 132., 135., 136., 137.],
[127., 128., 129., 132., 133., 134., 137., 138., 139.],
[135., 136., 137., 140., 141., 142., 145., 146., 147.],
[137., 138., 139., 142., 143., 144., 147., 148., 149.]]]])
However using the simple @ to multiply them, doesn'e lead me to the desired result.
What I want is somethinlg like: multiply the first two rows of A by the first 3 4x9 submatrices of B (let's say B[:,:,0,:]) so that I have two results, then in the same way muliply the third and fourth row of A with the second 3 4x9 submatrices of B, so to have again two results, then I want to sum the first results of each multiplication and the second results of each.
I know I have to work with some kind of reshapes but I find it so confusing, can you help me with a quite generalizable solution?
| This example would be helpful:
a = torch.ones((4, 4)).long()
a = a.reshape(2, 2, 4)
b = torch.tensor(list(range(36*6)))
b = b.reshape(2, 3, 4, 9)
t1 = a[0] @ b[0, :]
t2 = a[1] @ b[1, :]
result = t1 + t2
accum = torch.zeros((b.shape[1], a.shape[1], b.shape[3]))
for i in range(a.shape[0]):
accum = accum + (a[i] @ b[i, :])
| https://stackoverflow.com/questions/72388627/ |
How can I overcome PyTorch Tensor plotting problem? | I am a new PyTorch user and here is the code I am playing with.
epochs=20 # train for this number of epochs
losses=[] #to keep track on losses
for i in range(epochs):
i+=1 #counter
y_pred=model(cat_train,con_train)
loss=torch.sqrt(criterion(y_pred,y_train))
losses.append(loss) # append loss values
if i%10==1: # print out our progress
print(f'epoch: {i} loss is {loss}')
# back propagation
optimizer.zero_grad() # find the zero gradient
loss.backward() #move backward
optimizer.step()
plt.plot(range(epochs),losses)
and it gives me the following error:
RuntimeError: Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead.
I know the problem is related to the type of the losses with the following kind of rows:
tensor(3.6168, grad_fn=<SqrtBackward0>)
Can you suggest how I can grab the first column (numeric values of this tensor) and make it plottable e.i. an array not a Tensor.
| You can use torch.Tensor.item.
So, replace the statement
losses.append(loss)
with
losses.append(loss.item())
| https://stackoverflow.com/questions/72397590/ |
The problem of modified architecture of Resnet-101 | Recently, I am implementing the ME-Net using pytorch, however, I encounter some problem.
In the ME-Net paper (as attached figure [1]: https://i.stack.imgur.com/1uKRc.png), the top part using ResNet-101 to extract features(using the pretrained ResNet-101 weight on the ImageNet datasets as the initial weight), whose kernel is arithmetic mean.
I use (256,256,11) as the image input, but i am not sure stage0~stage1 belongs to which part in ResNet-101 and can't find the output feature map size in each stage corresponding to the paper.
ME-Net paper:
ME-Net: A Deep Convolutional Neural Network for Extracting Mangrove Using Sentinel-2A Data
https://www.mdpi.com/2072-4292/13/7/1
from torchvision import models
res101 = models.resnet101(pretrained=True)
res101.conv1=nn.Conv2d(11, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
res101.cuda()
summary(res101, (11,256,256)
The summary of res101 :
Layer (type) Output Shape Param #
Conv2d-1 [-1, 64, 128, 128] 34,496
BatchNorm2d-2 [-1, 64, 128, 128] 128
ReLU-3 [-1, 64, 128, 128] 0
MaxPool2d-4 [-1, 64, 64, 64] 0
Conv2d-5 [-1, 64, 64, 64] 4,096
BatchNorm2d-6 [-1, 64, 64, 64] 128
ReLU-7 [-1, 64, 64, 64] 0
Conv2d-8 [-1, 64, 64, 64] 36,864
BatchNorm2d-9 [-1, 64, 64, 64] 128
ReLU-10 [-1, 64, 64, 64] 0
Conv2d-11 [-1, 256, 64, 64] 16,384
BatchNorm2d-12 [-1, 256, 64, 64] 512
Conv2d-13 [-1, 256, 64, 64] 16,384
BatchNorm2d-14 [-1, 256, 64, 64] 512
ReLU-15 [-1, 256, 64, 64] 0
Bottleneck-16 [-1, 256, 64, 64] 0
Conv2d-17 [-1, 64, 64, 64] 16,384
BatchNorm2d-18 [-1, 64, 64, 64] 128
ReLU-19 [-1, 64, 64, 64] 0
Conv2d-20 [-1, 64, 64, 64] 36,864
BatchNorm2d-21 [-1, 64, 64, 64] 128
ReLU-22 [-1, 64, 64, 64] 0
Conv2d-23 [-1, 256, 64, 64] 16,384
BatchNorm2d-24 [-1, 256, 64, 64] 512
ReLU-25 [-1, 256, 64, 64] 0
Bottleneck-26 [-1, 256, 64, 64] 0
Conv2d-27 [-1, 64, 64, 64] 16,384
BatchNorm2d-28 [-1, 64, 64, 64] 128
ReLU-29 [-1, 64, 64, 64] 0
Conv2d-30 [-1, 64, 64, 64] 36,864
BatchNorm2d-31 [-1, 64, 64, 64] 128
ReLU-32 [-1, 64, 64, 64] 0
Conv2d-33 [-1, 256, 64, 64] 16,384
BatchNorm2d-34 [-1, 256, 64, 64] 512
ReLU-35 [-1, 256, 64, 64] 0
Bottleneck-36 [-1, 256, 64, 64] 0
Conv2d-37 [-1, 128, 64, 64] 32,768
BatchNorm2d-38 [-1, 128, 64, 64] 256
ReLU-39 [-1, 128, 64, 64] 0
Conv2d-40 [-1, 128, 32, 32] 147,456
BatchNorm2d-41 [-1, 128, 32, 32] 256
ReLU-42 [-1, 128, 32, 32] 0
Conv2d-43 [-1, 512, 32, 32] 65,536
BatchNorm2d-44 [-1, 512, 32, 32] 1,024
Conv2d-45 [-1, 512, 32, 32] 131,072
BatchNorm2d-46 [-1, 512, 32, 32] 1,024
ReLU-47 [-1, 512, 32, 32] 0
Bottleneck-48 [-1, 512, 32, 32] 0
Conv2d-49 [-1, 128, 32, 32] 65,536
BatchNorm2d-50 [-1, 128, 32, 32] 256
ReLU-51 [-1, 128, 32, 32] 0
Conv2d-52 [-1, 128, 32, 32] 147,456
BatchNorm2d-53 [-1, 128, 32, 32] 256
ReLU-54 [-1, 128, 32, 32] 0
Conv2d-55 [-1, 512, 32, 32] 65,536
BatchNorm2d-56 [-1, 512, 32, 32] 1,024
ReLU-57 [-1, 512, 32, 32] 0
Bottleneck-58 [-1, 512, 32, 32] 0
Conv2d-59 [-1, 128, 32, 32] 65,536
BatchNorm2d-60 [-1, 128, 32, 32] 256
ReLU-61 [-1, 128, 32, 32] 0
Conv2d-62 [-1, 128, 32, 32] 147,456
BatchNorm2d-63 [-1, 128, 32, 32] 256
ReLU-64 [-1, 128, 32, 32] 0
Conv2d-65 [-1, 512, 32, 32] 65,536
BatchNorm2d-66 [-1, 512, 32, 32] 1,024
ReLU-67 [-1, 512, 32, 32] 0
Bottleneck-68 [-1, 512, 32, 32] 0
Conv2d-69 [-1, 128, 32, 32] 65,536
BatchNorm2d-70 [-1, 128, 32, 32] 256
ReLU-71 [-1, 128, 32, 32] 0
Conv2d-72 [-1, 128, 32, 32] 147,456
BatchNorm2d-73 [-1, 128, 32, 32] 256
ReLU-74 [-1, 128, 32, 32] 0
Conv2d-75 [-1, 512, 32, 32] 65,536
BatchNorm2d-76 [-1, 512, 32, 32] 1,024
ReLU-77 [-1, 512, 32, 32] 0
Bottleneck-78 [-1, 512, 32, 32] 0
Conv2d-79 [-1, 256, 32, 32] 131,072
BatchNorm2d-80 [-1, 256, 32, 32] 512
ReLU-81 [-1, 256, 32, 32] 0
Conv2d-82 [-1, 256, 16, 16] 589,824
BatchNorm2d-83 [-1, 256, 16, 16] 512
ReLU-84 [-1, 256, 16, 16] 0
Conv2d-85 [-1, 1024, 16, 16] 262,144
BatchNorm2d-86 [-1, 1024, 16, 16] 2,048
Conv2d-87 [-1, 1024, 16, 16] 524,288
BatchNorm2d-88 [-1, 1024, 16, 16] 2,048
ReLU-89 [-1, 1024, 16, 16] 0
Bottleneck-90 [-1, 1024, 16, 16] 0
Conv2d-91 [-1, 256, 16, 16] 262,144
BatchNorm2d-92 [-1, 256, 16, 16] 512
ReLU-93 [-1, 256, 16, 16] 0
Conv2d-94 [-1, 256, 16, 16] 589,824
BatchNorm2d-95 [-1, 256, 16, 16] 512
ReLU-96 [-1, 256, 16, 16] 0
Conv2d-97 [-1, 1024, 16, 16] 262,144
BatchNorm2d-98 [-1, 1024, 16, 16] 2,048
ReLU-99 [-1, 1024, 16, 16] 0
Bottleneck-100 [-1, 1024, 16, 16] 0
Conv2d-101 [-1, 256, 16, 16] 262,144
BatchNorm2d-102 [-1, 256, 16, 16] 512
ReLU-103 [-1, 256, 16, 16] 0
Conv2d-104 [-1, 256, 16, 16] 589,824
BatchNorm2d-105 [-1, 256, 16, 16] 512
ReLU-106 [-1, 256, 16, 16] 0
Conv2d-107 [-1, 1024, 16, 16] 262,144
BatchNorm2d-108 [-1, 1024, 16, 16] 2,048
ReLU-109 [-1, 1024, 16, 16] 0
Bottleneck-110 [-1, 1024, 16, 16] 0
Conv2d-111 [-1, 256, 16, 16] 262,144
BatchNorm2d-112 [-1, 256, 16, 16] 512
ReLU-113 [-1, 256, 16, 16] 0
Conv2d-114 [-1, 256, 16, 16] 589,824
BatchNorm2d-115 [-1, 256, 16, 16] 512
ReLU-116 [-1, 256, 16, 16] 0
Conv2d-117 [-1, 1024, 16, 16] 262,144
BatchNorm2d-118 [-1, 1024, 16, 16] 2,048
ReLU-119 [-1, 1024, 16, 16] 0
Bottleneck-120 [-1, 1024, 16, 16] 0
Conv2d-121 [-1, 256, 16, 16] 262,144
BatchNorm2d-122 [-1, 256, 16, 16] 512
ReLU-123 [-1, 256, 16, 16] 0
Conv2d-124 [-1, 256, 16, 16] 589,824
BatchNorm2d-125 [-1, 256, 16, 16] 512
ReLU-126 [-1, 256, 16, 16] 0
Conv2d-127 [-1, 1024, 16, 16] 262,144
BatchNorm2d-128 [-1, 1024, 16, 16] 2,048
ReLU-129 [-1, 1024, 16, 16] 0
Bottleneck-130 [-1, 1024, 16, 16] 0
Conv2d-131 [-1, 256, 16, 16] 262,144
BatchNorm2d-132 [-1, 256, 16, 16] 512
ReLU-133 [-1, 256, 16, 16] 0
Conv2d-134 [-1, 256, 16, 16] 589,824
BatchNorm2d-135 [-1, 256, 16, 16] 512
ReLU-136 [-1, 256, 16, 16] 0
Conv2d-137 [-1, 1024, 16, 16] 262,144
BatchNorm2d-138 [-1, 1024, 16, 16] 2,048
ReLU-139 [-1, 1024, 16, 16] 0
Bottleneck-140 [-1, 1024, 16, 16] 0
Conv2d-141 [-1, 256, 16, 16] 262,144
BatchNorm2d-142 [-1, 256, 16, 16] 512
ReLU-143 [-1, 256, 16, 16] 0
Conv2d-144 [-1, 256, 16, 16] 589,824
BatchNorm2d-145 [-1, 256, 16, 16] 512
ReLU-146 [-1, 256, 16, 16] 0
Conv2d-147 [-1, 1024, 16, 16] 262,144
BatchNorm2d-148 [-1, 1024, 16, 16] 2,048
ReLU-149 [-1, 1024, 16, 16] 0
Bottleneck-150 [-1, 1024, 16, 16] 0
Conv2d-151 [-1, 256, 16, 16] 262,144
BatchNorm2d-152 [-1, 256, 16, 16] 512
ReLU-153 [-1, 256, 16, 16] 0
Conv2d-154 [-1, 256, 16, 16] 589,824
BatchNorm2d-155 [-1, 256, 16, 16] 512
ReLU-156 [-1, 256, 16, 16] 0
Conv2d-157 [-1, 1024, 16, 16] 262,144
BatchNorm2d-158 [-1, 1024, 16, 16] 2,048
ReLU-159 [-1, 1024, 16, 16] 0
Bottleneck-160 [-1, 1024, 16, 16] 0
Conv2d-161 [-1, 256, 16, 16] 262,144
BatchNorm2d-162 [-1, 256, 16, 16] 512
ReLU-163 [-1, 256, 16, 16] 0
Conv2d-164 [-1, 256, 16, 16] 589,824
BatchNorm2d-165 [-1, 256, 16, 16] 512
ReLU-166 [-1, 256, 16, 16] 0
Conv2d-167 [-1, 1024, 16, 16] 262,144
BatchNorm2d-168 [-1, 1024, 16, 16] 2,048
ReLU-169 [-1, 1024, 16, 16] 0
Bottleneck-170 [-1, 1024, 16, 16] 0
Conv2d-171 [-1, 256, 16, 16] 262,144
BatchNorm2d-172 [-1, 256, 16, 16] 512
ReLU-173 [-1, 256, 16, 16] 0
Conv2d-174 [-1, 256, 16, 16] 589,824
BatchNorm2d-175 [-1, 256, 16, 16] 512
ReLU-176 [-1, 256, 16, 16] 0
Conv2d-177 [-1, 1024, 16, 16] 262,144
BatchNorm2d-178 [-1, 1024, 16, 16] 2,048
ReLU-179 [-1, 1024, 16, 16] 0
Bottleneck-180 [-1, 1024, 16, 16] 0
Conv2d-181 [-1, 256, 16, 16] 262,144
BatchNorm2d-182 [-1, 256, 16, 16] 512
ReLU-183 [-1, 256, 16, 16] 0
Conv2d-184 [-1, 256, 16, 16] 589,824
BatchNorm2d-185 [-1, 256, 16, 16] 512
ReLU-186 [-1, 256, 16, 16] 0
Conv2d-187 [-1, 1024, 16, 16] 262,144
BatchNorm2d-188 [-1, 1024, 16, 16] 2,048
ReLU-189 [-1, 1024, 16, 16] 0
Bottleneck-190 [-1, 1024, 16, 16] 0
Conv2d-191 [-1, 256, 16, 16] 262,144
BatchNorm2d-192 [-1, 256, 16, 16] 512
ReLU-193 [-1, 256, 16, 16] 0
Conv2d-194 [-1, 256, 16, 16] 589,824
BatchNorm2d-195 [-1, 256, 16, 16] 512
ReLU-196 [-1, 256, 16, 16] 0
Conv2d-197 [-1, 1024, 16, 16] 262,144
BatchNorm2d-198 [-1, 1024, 16, 16] 2,048
ReLU-199 [-1, 1024, 16, 16] 0
Bottleneck-200 [-1, 1024, 16, 16] 0
Conv2d-201 [-1, 256, 16, 16] 262,144
BatchNorm2d-202 [-1, 256, 16, 16] 512
ReLU-203 [-1, 256, 16, 16] 0
Conv2d-204 [-1, 256, 16, 16] 589,824
BatchNorm2d-205 [-1, 256, 16, 16] 512
ReLU-206 [-1, 256, 16, 16] 0
Conv2d-207 [-1, 1024, 16, 16] 262,144
BatchNorm2d-208 [-1, 1024, 16, 16] 2,048
ReLU-209 [-1, 1024, 16, 16] 0
Bottleneck-210 [-1, 1024, 16, 16] 0
Conv2d-211 [-1, 256, 16, 16] 262,144
BatchNorm2d-212 [-1, 256, 16, 16] 512
ReLU-213 [-1, 256, 16, 16] 0
Conv2d-214 [-1, 256, 16, 16] 589,824
BatchNorm2d-215 [-1, 256, 16, 16] 512
ReLU-216 [-1, 256, 16, 16] 0
Conv2d-217 [-1, 1024, 16, 16] 262,144
BatchNorm2d-218 [-1, 1024, 16, 16] 2,048
ReLU-219 [-1, 1024, 16, 16] 0
Bottleneck-220 [-1, 1024, 16, 16] 0
Conv2d-221 [-1, 256, 16, 16] 262,144
BatchNorm2d-222 [-1, 256, 16, 16] 512
ReLU-223 [-1, 256, 16, 16] 0
Conv2d-224 [-1, 256, 16, 16] 589,824
BatchNorm2d-225 [-1, 256, 16, 16] 512
ReLU-226 [-1, 256, 16, 16] 0
Conv2d-227 [-1, 1024, 16, 16] 262,144
BatchNorm2d-228 [-1, 1024, 16, 16] 2,048
ReLU-229 [-1, 1024, 16, 16] 0
Bottleneck-230 [-1, 1024, 16, 16] 0
Conv2d-231 [-1, 256, 16, 16] 262,144
BatchNorm2d-232 [-1, 256, 16, 16] 512
ReLU-233 [-1, 256, 16, 16] 0
Conv2d-234 [-1, 256, 16, 16] 589,824
BatchNorm2d-235 [-1, 256, 16, 16] 512
ReLU-236 [-1, 256, 16, 16] 0
Conv2d-237 [-1, 1024, 16, 16] 262,144
BatchNorm2d-238 [-1, 1024, 16, 16] 2,048
ReLU-239 [-1, 1024, 16, 16] 0
Bottleneck-240 [-1, 1024, 16, 16] 0
Conv2d-241 [-1, 256, 16, 16] 262,144
BatchNorm2d-242 [-1, 256, 16, 16] 512
ReLU-243 [-1, 256, 16, 16] 0
Conv2d-244 [-1, 256, 16, 16] 589,824
BatchNorm2d-245 [-1, 256, 16, 16] 512
ReLU-246 [-1, 256, 16, 16] 0
Conv2d-247 [-1, 1024, 16, 16] 262,144
BatchNorm2d-248 [-1, 1024, 16, 16] 2,048
ReLU-249 [-1, 1024, 16, 16] 0
Bottleneck-250 [-1, 1024, 16, 16] 0
Conv2d-251 [-1, 256, 16, 16] 262,144
BatchNorm2d-252 [-1, 256, 16, 16] 512
ReLU-253 [-1, 256, 16, 16] 0
Conv2d-254 [-1, 256, 16, 16] 589,824
BatchNorm2d-255 [-1, 256, 16, 16] 512
ReLU-256 [-1, 256, 16, 16] 0
Conv2d-257 [-1, 1024, 16, 16] 262,144
BatchNorm2d-258 [-1, 1024, 16, 16] 2,048
ReLU-259 [-1, 1024, 16, 16] 0
Bottleneck-260 [-1, 1024, 16, 16] 0
Conv2d-261 [-1, 256, 16, 16] 262,144
BatchNorm2d-262 [-1, 256, 16, 16] 512
ReLU-263 [-1, 256, 16, 16] 0
Conv2d-264 [-1, 256, 16, 16] 589,824
BatchNorm2d-265 [-1, 256, 16, 16] 512
ReLU-266 [-1, 256, 16, 16] 0
Conv2d-267 [-1, 1024, 16, 16] 262,144
BatchNorm2d-268 [-1, 1024, 16, 16] 2,048
ReLU-269 [-1, 1024, 16, 16] 0
Bottleneck-270 [-1, 1024, 16, 16] 0
Conv2d-271 [-1, 256, 16, 16] 262,144
BatchNorm2d-272 [-1, 256, 16, 16] 512
ReLU-273 [-1, 256, 16, 16] 0
Conv2d-274 [-1, 256, 16, 16] 589,824
BatchNorm2d-275 [-1, 256, 16, 16] 512
ReLU-276 [-1, 256, 16, 16] 0
Conv2d-277 [-1, 1024, 16, 16] 262,144
BatchNorm2d-278 [-1, 1024, 16, 16] 2,048
ReLU-279 [-1, 1024, 16, 16] 0
Bottleneck-280 [-1, 1024, 16, 16] 0
Conv2d-281 [-1, 256, 16, 16] 262,144
BatchNorm2d-282 [-1, 256, 16, 16] 512
ReLU-283 [-1, 256, 16, 16] 0
Conv2d-284 [-1, 256, 16, 16] 589,824
BatchNorm2d-285 [-1, 256, 16, 16] 512
ReLU-286 [-1, 256, 16, 16] 0
Conv2d-287 [-1, 1024, 16, 16] 262,144
BatchNorm2d-288 [-1, 1024, 16, 16] 2,048
ReLU-289 [-1, 1024, 16, 16] 0
Bottleneck-290 [-1, 1024, 16, 16] 0
Conv2d-291 [-1, 256, 16, 16] 262,144
BatchNorm2d-292 [-1, 256, 16, 16] 512
ReLU-293 [-1, 256, 16, 16] 0
Conv2d-294 [-1, 256, 16, 16] 589,824
BatchNorm2d-295 [-1, 256, 16, 16] 512
ReLU-296 [-1, 256, 16, 16] 0
Conv2d-297 [-1, 1024, 16, 16] 262,144
BatchNorm2d-298 [-1, 1024, 16, 16] 2,048
ReLU-299 [-1, 1024, 16, 16] 0
Bottleneck-300 [-1, 1024, 16, 16] 0
Conv2d-301 [-1, 256, 16, 16] 262,144
BatchNorm2d-302 [-1, 256, 16, 16] 512
ReLU-303 [-1, 256, 16, 16] 0
Conv2d-304 [-1, 256, 16, 16] 589,824
BatchNorm2d-305 [-1, 256, 16, 16] 512
ReLU-306 [-1, 256, 16, 16] 0
Conv2d-307 [-1, 1024, 16, 16] 262,144
BatchNorm2d-308 [-1, 1024, 16, 16] 2,048
ReLU-309 [-1, 1024, 16, 16] 0
Bottleneck-310 [-1, 1024, 16, 16] 0
Conv2d-311 [-1, 512, 16, 16] 524,288
BatchNorm2d-312 [-1, 512, 16, 16] 1,024
ReLU-313 [-1, 512, 16, 16] 0
Conv2d-314 [-1, 512, 8, 8] 2,359,296
BatchNorm2d-315 [-1, 512, 8, 8] 1,024
ReLU-316 [-1, 512, 8, 8] 0
Conv2d-317 [-1, 2048, 8, 8] 1,048,576
BatchNorm2d-318 [-1, 2048, 8, 8] 4,096
Conv2d-319 [-1, 2048, 8, 8] 2,097,152
BatchNorm2d-320 [-1, 2048, 8, 8] 4,096
ReLU-321 [-1, 2048, 8, 8] 0
Bottleneck-322 [-1, 2048, 8, 8] 0
Conv2d-323 [-1, 512, 8, 8] 1,048,576
BatchNorm2d-324 [-1, 512, 8, 8] 1,024
ReLU-325 [-1, 512, 8, 8] 0
Conv2d-326 [-1, 512, 8, 8] 2,359,296
BatchNorm2d-327 [-1, 512, 8, 8] 1,024
ReLU-328 [-1, 512, 8, 8] 0
Conv2d-329 [-1, 2048, 8, 8] 1,048,576
BatchNorm2d-330 [-1, 2048, 8, 8] 4,096
ReLU-331 [-1, 2048, 8, 8] 0
Bottleneck-332 [-1, 2048, 8, 8] 0
Conv2d-333 [-1, 512, 8, 8] 1,048,576
BatchNorm2d-334 [-1, 512, 8, 8] 1,024
ReLU-335 [-1, 512, 8, 8] 0
Conv2d-336 [-1, 512, 8, 8] 2,359,296
BatchNorm2d-337 [-1, 512, 8, 8] 1,024
ReLU-338 [-1, 512, 8, 8] 0
Conv2d-339 [-1, 2048, 8, 8] 1,048,576
BatchNorm2d-340 [-1, 2048, 8, 8] 4,096
ReLU-341 [-1, 2048, 8, 8] 0
Bottleneck-342 [-1, 2048, 8, 8] 0
AdaptiveAvgPool2d-343 [-1, 2048, 1, 1] 0
Linear-344 [-1, 1000] 2,049,000
I think the output feature map in stage0 is Layer:ReLU-3; the output feature map in stage1 is Layer:ReLU-32; the output feature map in stage2 is Layer:ReLU-81; the output feature map in stage3 is Layer:ReLU-313; I can't find the output feature maps in stage4 and stage5 in the size of (8,8,1024) and (4,4,2048).
-----Update below-----
To acquire the feature map in stage0~stage5, I use the code below:
stage5 = nn.Sequential(*list(res101.children())[:-2])
stage4 = nn.Sequential(*list(res101.children())[:-3])
stage3 = nn.Sequential(*list(res101.children())[:-4])
stage2 = nn.Sequential(*list(res101.children())[:-5])
stage1 = nn.Sequential(*list(res101.children())[:-6])
stage0 = nn.Sequential(*list(res101.children())[:-7])
However, the stage0~5 seems just represent the architecture.
How can I get their feature maps?
| It is easier to check PyTorchβs source code for troubles like this one, see here.
Look at the ResNet class (which is used to create different variants via factory-like functions) for clues.
In this case, respective layers would be:
conv1 (stage0)
max pooled output of conv1 (stage1), this one is assumed by the shapes
layer1 (stage2)
layer2
layer3
layer4(stage5)
Also, you might use print(res101) to see all of the blocks for easier debugging as it has a hierarchical structure.
Obtaining features
You should use PyTorch FX for that (see here).
In your case it would be something along these lines:
from torchvision.models.feature_extractor import create_feature_extractor
extractor = create_feature_extractor(
model,
return_nodes=[
βconv1β,
βmaxpoolβ,
βlayer1β,
βlayer2β,
βlayer3β,
βlayer4β,
]
)
features = extractor(inputs)
Features should be a dict with keys being names of the layers specified above and values being resulting tensors.
| https://stackoverflow.com/questions/72402905/ |
PyTorch ConvNet not working. Loss goes down as accuracy stays about %14 | I am trying to learn pytorch and this is my first convolutional network. But the model is not training. Loss goes down on every epoch but accuracy fluctuates between 10-20%. I want to know what am i doing wrong to improve myself.
This is the data loading part
training_data = datasets.MNIST(
root="data",
train=True,
download=True,
transform=transforms.ToTensor(),
target_transform=transforms.Lambda(lambda y: torch.zeros(10,dtype=torch.float).scatter_(0,torch.tensor(y),value=1))
)
test_data = datasets.MNIST(
root="data",
train=False,
download=True,
transform=transforms.ToTensor(),
target_transform=transforms.Lambda(lambda y: torch.zeros(10,dtype=torch.float).scatter_(0,torch.tensor(y),value=1))
)
train_dataloader = DataLoader(training_data,batch_size=64,shuffle=True)
test_dataloader = DataLoader(test_data,batch_size=64,shuffle=True)
This is my model
from torch.nn.modules.pooling import MaxPool2d
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.CNN_stack = nn.Sequential(
nn.ReflectionPad2d((1,0,1,0)),
nn.Conv2d(in_channels=1,out_channels=5,kernel_size=5,stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=5,out_channels=50,kernel_size=5,stride=2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1250,100),
nn.ReLU(),
nn.Linear(100,10)
)
def forward(self,x):
logits = self.CNN_stack(x)
return logits
model = CNN().to(device)
These are my propagation loops
def train_loop(batch,X,y,model,loss_fn,optimizer):
size = 60000
#Forward Prop
pred = model(X)
loss = loss_fn(pred,y)
#Backward Prop
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X,y=X.to(device),y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(0) == y).type(torch.float).sum().item()
#print(f"{pred[0].argmax(0)}={y[0]}")
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
for batch, (X,y) in enumerate(train_dataloader):
X,y = X.to(device), y.to(device)
train_loop(batch,X, y , model, loss_fn, optimizer)
test_loop(test_dataloader , model, loss_fn)
print("Done!")
| your accuracy calculation is not correct:
on pred side use argmax(1);
on y side note that y is one-hot encoded, so use argmax there, or something else.
this will work:
correct += (pred.argmax(1) == y.argmax(1)).sum().item()
Also use higher learning rate, like 0.01 to see faster learning.
With these changes your net yields Accuracy==97.6% after 10 epochs.
| https://stackoverflow.com/questions/72405377/ |
Boolean value of Tensor with more than one value is ambiguous | I have this class of NN:
class Block(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride,
padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = nn.ReLU(out)
return out
I create a model and pass it the random input, and it shows the error:
model = Block(3,3, 1)
x = torch.rand(64, 3, 100, 100)
model(x)
I received this error:
RuntimeError: Boolean value of Tensor with more than one value is ambiguous
| The issue is with the nn.ReLU() in the feedforward(). I was printing it which is not possible in ipynb file.
class Block(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride,
padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
| https://stackoverflow.com/questions/72407092/ |
forward() using Pytorch Lightning not giving consistent binary classification results for single VS multiple images |
I have trained a Variational Autoencoder (VAE) with an additional fully connected layer after the encoder for binary image classification. It is setup using PyTorch Lightning. The encoder / decoder is resnet18 from PyTorch Lightning Bolts repo.
from pl_bolts.models.autoencoders.components import (
resnet18_encoder,
resnet18_decoder
)
class VariationalAutoencoder(LightningModule):
...
self.first_conv: bool = False
self.maxpool1: bool = False
self.enc_out_dim: int = 512
self.encoder = resnet18_encoder(first_conv, maxpool1)
self.fc_object_identity = nn.Linear(self.enc_out_dim, 1)
def forward(self, x):
x_encoded = self.encoder(x)
mu = self.fc_mu(x_encoded)
log_var = self.fc_var(x_encoded)
p, q, z = self.sample(mu, log_var)
x_classification_score = torch.sigmoid(self.fc_object_identity(x_encoded))
return self.decoder(z), x_classification_score
variational_autoencoder = VariationalAutoencoder.load_from_checkpoint(
checkpoint_path=str(checkpoint_file_path)
)
with torch.no_grad():
predicted_images, classification_score = variational_autoencoder(test_images)
The reconstructions work well for single images and multiple images when passed through forward(). However, when I pass multiple images to forward() I get different results for the classification score than if I pass a single image tensor:
# Image 1 (class=1) [1, 3, 64, 64]
x_classification_score = 0.9857
# Image 2 (class=0) [1, 3, 64, 64]
x_classification_score = 0.0175
# Image 1 and 2 [2, 3, 64, 64]
x_classification_score =[[0.8943],
[0.1736]]
Why is this happening?
| You are using resnet18 which has a torch.nn.BatchNorm2d layer.
Its behavior changes whether it is in train or eval mode. It calculates mean and variance across batch during training and hence its output is dependent on examples in this batch.
In evaluation mode mean and variance gathered during training via moving average are used which is batch independent, hence results are the same.
| https://stackoverflow.com/questions/72408636/ |
PyTorch: why does running output = model(images) use so much GPU memory? | In trying to understand why my maximum batch size is limited for my PyTorch model, I noticed that it's not the model itself nor loading the tensors onto the GPU that uses the most memory. Most memory is used up when generating a prediction for the first time, e.g. with the following line in the training loop:
output = model(images)
where images is some input tensor, and model is my PyTorch model. Before running the line, I have something like 9GB of GPU memory available, and afterwards I'm down to 2.5GB (it then further drops to 1GB available after running loss = criterion(outputs, labels).
Two questions:
Is this normal?
Why is it happening? What is all that memory being used for? From what I understand the model is already loaded in, and the actual input tensors are already on the GPU before making that call. The output tensors themselves can't be that big. Does it have something to do with storing the computational graph?
| This is normal: The key here is that all intermediate tensors (the whole computation graph) have to be stored if you want to compute the gradient via backward-mode differentiation.
You can aviod that by using the .no_grad context manager:
with torch.no_grad():
output = model(images)
You will observe that a lot less memory is used, because no computation graph will be stored. But this also means that you can't compute the derivatives anymore. It is however the standard way if you just want to evaluate the model without the need of any optimization.
There is one way to reduce the memory cosumption if you still want to optimize, and it is called checkpointing. Whenever you need an intermediate tensor in the backward pass, it will be computed again from the input (or actually from the last "checkpoint"), without storing an intermediate tensor up to that tensor. But this is just computationally more expensive. You're trading memory against computational time.
| https://stackoverflow.com/questions/72408888/ |
Why does huggingface tokenizer return only 1 `input_ids` instead of 3? | I'm trying to tokenize the squad dataset following the huggingface tutorial:
from datasets import load_dataset
from transformers import RobertaTokenizer
from transformers import logging
logging.set_verbosity_error()
dataset = load_dataset('squad')
checkpoint = 'roberta-base'
tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example['question'], example['context'], [d['text'][0] for d in example['answers']], truncation=True)
tokenized_datasets = dataset['train'].map(tokenize_function, batched=True)
But when I print
tokenized_datasets
I get
Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'attention_mask'],
num_rows: 87599
})
But shouldn't this return 3 input_ids, one for the question one for the context and one for the answer?
| Is that line of code:
tokenizer(example['question'], example['context'], [d['text'][0] for d in example['answers']], truncation=True)
shown in the course?
A Tokenizer accepts plenty of parameters with its __call__ method (documentation). Since you have only specified truncation by its name, the other parameter values are determined by their position. That means, you are executing:
tokenizer(text=example['question'], text_pair=example['context'], add_special_tokens=[d['text'][0] for d in example['answers']], truncation=True)
After you execute your code the sample with the id 5733be284776f41900661182 becomes:
{'id': '5733be284776f41900661182',
'title': 'University_of_Notre_Dame',
'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.',
'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?',
'answers': {'text': ['Saint Bernadette Soubirous'], 'answer_start': [515]},
'input_ids': [0, 3972, 2661, 222, 5, 9880, 2708, 2346, 2082, 11, 504, 4432, 11, 226, 2126, 10067, 1470, 116, 2, 2, 37848, 37471, 28108, 6, 5, 334, 34, 10, 4019, 2048, 4, 497, 1517, 5, 4326, 6919, 18, 1637, 31346, 16, 10, 9030, 9577, 9, 5, 9880, 2708, 4, 29261, 11, 760, 9, 5, 4326, 6919, 8, 2114, 24, 6, 16, 10, 7621, 9577, 9, 4845, 19, 3701, 62, 33161, 19, 5, 7875, 22, 39043, 1459, 1614, 1464, 13292, 4977, 845, 4130, 7, 5, 4326, 6919, 16, 5, 26429, 2426, 9, 5, 25095, 6924, 4, 29261, 639, 5, 32394, 2426, 16, 5, 7461, 26187, 6, 10, 19035, 317, 9, 9621, 8, 12456, 4, 85, 16, 10, 24633, 9, 5, 11491, 26187, 23, 226, 2126, 10067, 6, 1470, 147, 5, 9880, 2708, 2851, 13735, 352, 1382, 7, 6130, 6552, 625, 3398, 208, 22895, 853, 1827, 11, 504, 4432, 4, 497, 5, 253, 9, 5, 1049, 1305, 36, 463, 11, 10, 2228, 516, 14, 15230, 149, 155, 19638, 8, 5, 2610, 25336, 238, 16, 10, 2007, 6, 2297, 7326, 9577, 9, 2708, 4, 2],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
The input_ids are the concatenation of text and text_pair:
tokenizer.decode([0, 3972, 2661, 222, 5, 9880, 2708, 2346, 2082, 11, 504, 4432, 11, 226, 2126, 10067, 1470, 116, 2, 2, 37848, 37471, 28108, 6, 5, 334, 34, 10, 4019, 2048, 4, 497, 1517, 5, 4326, 6919, 18, 1637, 31346, 16, 10, 9030, 9577, 9, 5, 9880, 2708, 4, 29261, 11, 760, 9, 5, 4326, 6919, 8, 2114, 24, 6, 16, 10, 7621, 9577, 9, 4845, 19, 3701, 62, 33161, 19, 5, 7875, 22, 39043, 1459, 1614, 1464, 13292, 4977, 845, 4130, 7, 5, 4326, 6919, 16, 5, 26429, 2426, 9, 5, 25095, 6924, 4, 29261, 639, 5, 32394, 2426, 16, 5, 7461, 26187, 6, 10, 19035, 317, 9, 9621, 8, 12456, 4, 85, 16, 10, 24633, 9, 5, 11491, 26187, 23, 226, 2126, 10067, 6, 1470, 147, 5, 9880, 2708, 2851, 13735, 352, 1382, 7, 6130, 6552, 625, 3398, 208, 22895, 853, 1827, 11, 504, 4432, 4, 497, 5, 253, 9, 5, 1049, 1305, 36, 463, 11, 10, 2228, 516, 14, 15230, 149, 155, 19638, 8, 5, 2610, 25336, 238, 16, 10, 2007, 6, 2297, 7326, 9577, 9, 2708, 4, 2])
Output:
<s>To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?</s></s>Architecturally, the school has a Catholic character. Atop the Main Building's gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.</s>
That is a common approach to handling extractive questions-answering tasks. In this, the answers are not seen as input but are only needed as a target (i.e. predicting start and end position).
Edit:
The OP specified the question in the comments and wants to know how the input_ids of the three text entities: question, context, and answer can be returned. All that needs to be changed is that the tokenize_function encodes the entities independently and returns a dict:
from datasets import load_dataset
from transformers import RobertaTokenizer
dataset = load_dataset('squad')
checkpoint = 'roberta-base'
tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
question_o = tokenizer(example['question'], truncation=True)
context_o = tokenizer(example['context'], truncation=True)
answer_o = tokenizer([d['text'][0] for d in example['answers']], truncation=True)
return {"question_input_ids": question_o.input_ids, "question_attention_mask": question_o.attention_mask, "context_input_ids": context_o.input_ids, "context_attention_mask": context_o.attention_mask, "answer_input_ids": answer_o.input_ids, "answer_attention_mask": answer_o.attention_mask}
tokenized_datasets = dataset['train'].map(tokenize_function, batched=True)
| https://stackoverflow.com/questions/72411360/ |
How to move PyTorch model to GPU on Apple M1 chips? | On 18th May 2022, PyTorch announced support for GPU-accelerated PyTorch training on Mac.
I followed the following process to set up PyTorch on my Macbook Air M1 (using miniconda).
conda create -n torch-nightly python=3.8
$ conda activate torch-nightly
$ pip install --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
I am trying to execute a script from Udacity's Deep Learning Course available here.
The script moves the models to GPU using the following code:
G.cuda()
D.cuda()
However, this will not work on M1 chips, since there is no CUDA.
If we want to move models to M1 GPU and our tensors to M1 GPU, and train entirely on M1 GPU, what should we be doing?
If Relevant: G and D are Discriminator and Generators for GAN's.
class Discriminator(nn.Module):
def __init__(self, conv_dim=32):
super(Discriminator, self).__init__()
self.conv_dim = conv_dim
# complete init function
self.cv1 = conv(in_channels=3, out_channels=conv_dim, kernel_size=4, stride=2, padding=1, batch_norm=False) # 32*32*3 -> 16*16*32
self.cv2 = conv(in_channels=conv_dim, out_channels=conv_dim*2, kernel_size=4, stride=2, padding=1, batch_norm=True) # 16*16*32 -> 8*8*64
self.cv3 = conv(in_channels=conv_dim*2, out_channels=conv_dim*4, kernel_size=4, stride=2, padding=1, batch_norm=True) # 8*8*64 -> 4*4*128
self.fc1 = nn.Linear(in_features = 4*4*conv_dim*4, out_features = 1, bias=True)
def forward(self, x):
# complete forward function
out = F.leaky_relu(self.cv1(x), 0.2)
out = F.leaky_relu(self.cv2(x), 0.2)
out = F.leaky_relu(self.cv3(x), 0.2)
out = out.view(-1, 4*4*conv_dim*4)
out = self.fc1(out)
return out
D = Discriminator(conv_dim)
class Generator(nn.Module):
def __init__(self, z_size, conv_dim=32):
super(Generator, self).__init__()
self.conv_dim = conv_dim
self.z_size = z_size
# complete init function
self.fc1 = nn.Linear(in_features = z_size, out_features = 4*4*conv_dim*4)
self.dc1 = deconv(in_channels = conv_dim*4, out_channels = conv_dim*2, kernel_size=4, stride=2, padding=1, batch_norm=True)
self.dc2 = deconv(in_channels = conv_dim*2, out_channels = conv_dim, kernel_size=4, stride=2, padding=1, batch_norm=True)
self.dc3 = deconv(in_channels = conv_dim, out_channels = 3, kernel_size=4, stride=2, padding=1, batch_norm=False)
def forward(self, x):
# complete forward function
x = self.fc1(x)
x = x.view(-1, conv_dim*4, 4, 4)
x = F.relu(self.dc1(x))
x = F.relu(self.dc2(x))
x = F.tanh(self.dc3(x))
return x
G = Generator(z_size=z_size, conv_dim=conv_dim)
| This is what I used:
if torch.backends.mps.is_available():
mps_device = torch.device("mps")
G.to(mps_device)
D.to(mps_device)
Similarly for all tensors that I want to move to M1 GPU, I used:
tensor_ = tensor_(mps_device)
Some operations are ot yet implemented using MPS, and we might need to set a few environment variables to use CPU fall back instead:
One error that I faced during executing the script was
# NotImplementedError: The operator 'aten::_slow_conv2d_forward' is not current implemented for the MPS device. If you want this op to be added in priority during the prototype phase of this feature, please comment on https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be slower than running natively on MPS.
To solve it I set the environment variable PYTORCH_ENABLE_MPS_FALLBACK=1
conda env config vars set PYTORCH_ENABLE_MPS_FALLBACK=1
conda activate <test-env>
References:
https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/
https://pytorch.org/docs/master/notes/mps.html
https://sebastianraschka.com/blog/2022/pytorch-m1-gpu.html
https://sebastianraschka.com/blog/2022/pytorch-m1-gpu.html
https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#setting-environment-variables
| https://stackoverflow.com/questions/72416726/ |
PyTorch- call to backward() on two different functions in sequence gives unexpected output second time | I am trying to differentiate two functions z and y which are based on same input x. While doing so, I run y.backward() and z.backward(). As per my understanding, I have created two functions before doing any operation on the input so both y and z should be independent and give independent results of differentiation.
However, the output of second call to backward() gives incorrect output. It can be called as y.backward() then z.backward(). In this case, z.backward() gives 14 as output instead of 12. If I run z.backward() and then y.backward(), y.backward() gives 14 as output. Only first output is correct in both cases. I cannot understand how is it giving 14 in second time.
import torch
x = torch.tensor(2.0, requires_grad=True)
y = 2 * x + 3
z = x**3 + 1
y.backward()
print('grad attribute of the tensor::',x.grad)
z.backward()
print('grad attribute of the tensor::',x.grad)
Output:
grad attribute of the tensor:: tensor(2.)
grad attribute of the tensor:: tensor(14.)
| backward() does not overwrite tensor grad attributes; it accumulates them. If you don't zero out the gradients in between backpropagations, the resulting gradients will be the sum of the gradients from each backpropagation. The reason that it's implemented like this is to better support recurrent neural networks.
The most conventional way to zero out the gradients is by calling torch.optim.Optimizer.zero_grad(); this will zero out the gradients of all of the parameters passed to the optimizer on construction. This works well when you're just using the gradients for an optimizer step. There's also torch.nn.Module.zero_grad(), which zeroes out the module's parameters (I believe it's recursive, so it should also zero out the sub-modules' parameters, and so on).
Also note that if you need to reuse intermediate results (gradients of non-leaf tensors), then you need to pass retain_graph=True when calling backward(); this is usually the case when working with recurrent neural networks. Otherwise, PyTorch will free them to conserve memory.
| https://stackoverflow.com/questions/72417872/ |
PyTorch's custom dataset class should inherit from torch.utils.data.Dataset or not - why both work? | I was studying the PyTorch's Dataset class. From what I knew beforehand, we need to inherit from torch.utils.data.Dataset everytime we create a CustomDataset class of our own; and further we need to override the __len__ and __getitem__ methods as per need.
But, I got to know that it isn't always necessary to inherit and we could go on to create our CustomDataset class with __len__ and __getitem__ methods without inheriting from torch.utils.data.Dataset and even then the behaviour of an instance of custom dataset remains pretty much same (I tested it myself).
That to say, len(cust_data) would return the length of the dataset we pass while creating our cust_data instance, and we could even index cust_data like cust_data[0] and it would return what's returned by __getitem__ method in our CustomDataset class.
My questions are -
What is the need of inheriting when we are just as fine without inheriting - and if we are not, what functionality do we miss on if we do not inherit? When is inheriting recommended and when is it not? (While the official docs recommend to inherit, always)
When not inheriting, how did the instance know it needs to call the __getitem__ method when it is indexed?
Any answers appreciated.
|
Take a look at the source code for torch.utils.data.Dataset - it is an abstract class, which guarantees that every class inherting it must implement __getitem__. In other words, you don't "need" to inherit Dataset: as long as __getitem__ is properly implemented your dataset class will work fine. The reason of why doing so has become common practice is that it indicates to a third-party (e.g. some other code that uses your dataset class, someone else reading your code) that the class in question has __getitem__ implemented. It provides a common interface for PyTorch datasets.
Executing someClass[i] will automatically call someClass.__getitem__ with parameter i (and will throw an error if __getitem__ is not implemented). This is a Python built-in feature and has nothing to do with whichever base class you are inheriting. You can Google "dunder methods" to learn more about these special behaviors.
| https://stackoverflow.com/questions/72417883/ |
How to run a GNN example with Pytorch, on a CPU without CUDA? | I am trying to code a GNN example problem as shown in the given link: https://towardsdatascience.com/hands-on-graph-neural-networks-with-pytorch-pytorch-geometric-359487e221a8
I am using a Macbook Pro 2016 edition, without a Nvidia graphic card!
The example problem is implementing 'CUDA' toolkit. Can I somehow modify the code and run in on my current laptop? I have made the dataset sufficiently small, such that it does not requires high computation and can run on my PC!
The part of the code which is giving an error is as follows!
def train():
model.train()
loss_all = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data)
label = data.y.to(device)
loss = crit(output, label)
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all / len(train_dataset)
device = torch.device('cuda')
model = Net().to(device) # Net = A class inherited from torch.nn.Module
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
crit = torch.nn.BCELoss()
train_loader = DataLoader(train_dataset, batch_size=batch_size)
for epoch in range(num_epochs):
train()
The error is as follows
AssertionError: Torch not compiled with CUDA enabled
| You are using:
device = torch.device('cuda')
If you like to use cpu please change to:
device = torch.device('cpu')
| https://stackoverflow.com/questions/72418399/ |
Fusing the features of two classes and feeding them with the fused features | In the main class, there are two classes and firstly the first class is recalled and after that the second class is recalled. I want to use a module that receives the features from these two classes and does some calculations and finally each of the mentioned classes needs to receive the outcome of the module.
The idea that comes in my mind is that importing the feature of the first class in to the second class and after that applying the module in the second class but my question is that in this scenario it is not possible to import the outcome of the module into the first class.
For example for these two classes and the module class:
class first(nn.Module):
def __init__(self, in_planes=128, out_planes=64, kernel_size=3, stride=1, padding=0):
super(first, self).__init__()
self.conv_s = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, groups=in_planes)
self.bn_s = nn.BatchNorm2d(out_planes)
self.relu_s = nn.ReLU()
def forward(self, x):
x = self.conv_s(x)
y1 = self.bn_s(x)
x = self.relu_s(x)
return x
class second(nn.Module):
def __init__(self, in_planes=128, out_planes=64, kernel_size=3, stride=1, padding=0):
super(second, self).__init__()
self.conv_s = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, groups=in_planes)
self.bn_s = nn.BatchNorm2d(out_planes)
self.relu_s = nn.ReLU()
def forward(self, x):
x = self.conv_s(x)
y2 = self.bn_s(x)
x = self.relu_s(x)
return x
The Module class:
class module(nn.Module):
def __init__(self):
super(module, self).__init__()
self.conv1h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn1h = nn.BatchNorm2d(64)
self.conv2h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn2h = nn.BatchNorm2d(64)
self.conv3h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn3h = nn.BatchNorm2d(64)
self.conv4h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn4h = nn.BatchNorm2d(64)
self.conv1v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn1v = nn.BatchNorm2d(64)
self.conv2v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn2v = nn.BatchNorm2d(64)
self.conv3v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn3v = nn.BatchNorm2d(64)
self.conv4v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn4v = nn.BatchNorm2d(64)
def forward(self, left, down):
if down.size()[2:] != left.size()[2:]:
down = F.interpolate(down, size=left.size()[2:], mode='bilinear')
out1h = F.relu(self.bn1h(self.conv1h(left )), inplace=True)
out2h = F.relu(self.bn2h(self.conv2h(out1h)), inplace=True)
out1v = F.relu(self.bn1v(self.conv1v(down )), inplace=True)
out2v = F.relu(self.bn2v(self.conv2v(out1v)), inplace=True)
fuse = out2h*out2v
out3h = F.relu(self.bn3h(self.conv3h(fuse )), inplace=True)+out1h
out4h = F.relu(self.bn4h(self.conv4h(out3h)), inplace=True)
out3v = F.relu(self.bn3v(self.conv3v(fuse )), inplace=True)+out1v
out4v = F.relu(self.bn4v(self.conv4v(out3v)), inplace=True)
return out4h, out4v
The order of the classes in the main class is as follows:
class Main(nn.Module):
def __init__(self):
super(Main, self).__init__()
self.first=first(the required arguments)
self.second=second(the required arguments)
self.features = feature_extractor()
def forward(self, x):
x1, x2 = self.features(x) # as self.features, you can produce 128 convolutional channels
x1 = self.first(x1)
x2 = self.first(x2)
return x1, x2
My question is that how it is possible to import the outcome of the module into the first class. To be clearer, after importing the y1 and y2 variable of the first and second classes into the module class, how can I multiply one of the the outcomes of the module class with y1 in the first class and another outcome of the module class with the y2 variable in the second class.This is because if I integrate the module in the second class, although I can import the y1 into the second class and the module, I cannot import back the outcome of the module to the first class and multiply it with y1.
Update:
I want that the module class receive the y1 and y2 from the first class and second class respectively but I have not idea how I should integrate the module class inside the code. The image below can show clearer information about the idea:
| Since the program you've added is pretty convoluted, I've created similar classes to demonstrate the principle that will solve the problem.
class first:
# this class gives y1 in your example
def __init__(self, x = 0):
self.x = x
def get_y1(self):
# do any computations if needed
print(f"x (y1) has current value {self.x}")
return self.x
def calc(self, y):
self.x += y
print(f"x (y1) updated to {self.x}")
class second:
# this class gives y2 in your example
def __init__(self, x = 0):
self.x = x
def get_y2(self):
# do any computations if needed
print(f"x (y2) has current value {self.x}")
return self.x
def calc(self, y):
self.x *= y
print(f"x (y2) updated to {self.x}")
class module:
# this class takes y1 and y2 for computation and returns results
def __init__(self):
pass
def calc(self, x, y):
return x+1,y+1
class main:
def __init__(self):
self.first = first(x = 5)
self.second = second(x = 3)
self.module = module()
def calc(self):
y1 = self.first.get_y1()
y2 = self.second.get_y2()
result1, result2 = self.module.calc(y1, y2)
self.first.calc(result1)
self.second.calc(result2)
obj = main()
obj.calc()
'''
Output for this:
x (y1) has current value 5
x (y2) has current value 3
x (y1) updated to 11
x (y2) updated to 12
'''
This basically does the same thing as you're requesting: main class holds objects for first, second and module; a computation in module uses y1 and y2 from first and second. The returned values are then used to update y1 and y2 themselves.
| https://stackoverflow.com/questions/72418860/ |
PyTorch CUDA version is always 10.2 | I've installed a handful of PyTorch versions (CUDA 11.7 nightly, CUDA 11.6 nightly, 11.3), but every time, torch.version.cuda returns 10.2.
I'd like to run PyTorch on CUDA 11.7. My graphics card has CUDA capability sm_86.
[me@legion imagen-test]$ sudo pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
...
[me@legion imagen-test]$ python
>>> import torch
>>> print(torch.version.cuda)
10.2
When I actually try to use PyTorch, I get an error saying the PyTorch version I have installed doesn't support the newer version of CUDA my graphics card requires.
>>> torch.Tensor([1,2,3]).cuda()
...
NVIDIA GeForce RTX 3060 Laptop GPU with CUDA capability sm_86 is not compatible with the current PyTorch installation.
The current PyTorch install supports CUDA capabilities sm_37 sm_50 sm_60 sm_70.
...
RuntimeError: CUDA error: no kernel image is available for execution on the device
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
I'm completely stumped, and unsure where to go from here. I'd appreciate any help.
| You've probably installed PyTorch with CUDA 10.2 among your different installed versions. This may be taking priority over the versions of PyTorch. To fix this, simply uninstall all versions of PyTorch with pip uninstall torch -y and reinstall PyTorch with CUDA 11.7.
Source: https://discuss.pytorch.org/t/cuda-version-is-always-10-2/152876
| https://stackoverflow.com/questions/72427869/ |
Copy tensor elements of certain indices in PyTorch | The desired operation is similar in spirit to torch.Tensor.index_copy, but a little different.
It's best explained with an example.
Tensor A has original values that we will copy:
[10, 20, 30]
Tensor B has indices of A:
[0, 1, 0, 1, 2, 1]
Tensor C has same length as B, containing the indexed values of A:
[10, 20, 10, 20, 30, 20]
What's a good way to make C from A and B in PyTorch, without using loops?
| Have you tried just indexing by A?
In [1]: import torch
In [2]: a = torch.tensor([20,30,40])
In [3]: b = torch.tensor([0,1,2,1,1,2,0,0,1,2])
In [4]: a[b]
Out[4]: tensor([20, 30, 40, 30, 30, 40, 20, 20, 30, 40])
| https://stackoverflow.com/questions/72427902/ |
TypeError: 'numpy.float32' object is not iterable when logging in mlflow | I am trying a machine learning model and logging metrics using mlflow. But I am getting TypeError: 'numpy.float32' object is not iterable. I have tried using .tolist() and dict() but nothing seems to work.
def train(max_epochs, model, optimizer, scheduler, train_loader, valid_loader, project_name):
best_val_loss = 100
for epoch in range(max_epochs):
model.train()
running_loss = []
tq_loader = tqdm(train_loader)
o = {}
for samples in tq_loader:
optimizer.zero_grad()
outputs, interaction_map = model(
[samples[0].to(device), samples[1].to(device), torch.tensor(samples[2]).to(device),
torch.tensor(samples[3]).to(device)])
l1_norm = torch.norm(interaction_map, p=2) * 1e-4
loss = loss_fn(outputs, torch.tensor(samples[4]).to(device).float()) + l1_norm
loss.backward()
optimizer.step()
loss = loss - l1_norm
running_loss.append(loss.cpu().detach())
tq_loader.set_description(
"Epoch: " + str(epoch + 1) + " Training loss: " + str(np.mean(np.array(running_loss))))
model.eval()
val_loss, mae_loss = get_metrics(model, valid_loader)
scheduler.step(val_loss)
#metrics mlflow
mlflow.log_metrics('train_loss',(np.mean(np.array(running_loss))).tolist())
mlflow.log_metrics('validation_loss',(val_loss).tolist())
mlflow.log_metrics('MAE Val_loss', (mae_loss).tolist())
print("Epoch: " + str(epoch + 1) + " train_loss " + str(np.mean(np.array(running_loss))) + " Val_loss " + str(
val_loss) + " MAE Val_loss " + str(mae_loss))
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), "./runs/run-" + str(project_name) + "/models/best_model.tar")
mlflow.set_experiment('CIGIN_V2')
mlflow.start_run(nested=True)
train(max_epochs, model, optimizer, scheduler, train_loader, valid_loader, project_name)
mlflow.end_run()
Error
Epoch: 1 Training loss: 6770.575: 100%|ββββββββββ| 1/1 [00:04<00:00, 4.35s/it]
100%|ββββββββββ| 1/1 [00:03<00:00, 3.86s/it]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-96-8c3a6eb822c3> in <module>()
1 mlflow.set_experiment('CIGIN_V2')
2 mlflow.start_run(nested=True)
----> 3 train(max_epochs, model, optimizer, scheduler, train_loader, valid_loader, project_name)
4 mlflow.end_run()
<ipython-input-95-ab0a6c80b65b> in train(max_epochs, model, optimizer, scheduler, train_loader, valid_loader, project_name)
55
56 #metrics mlflow
---> 57 mlflow.log_metrics('train_loss',dict(np.mean(np.array(running_loss))).tolist())
58 mlflow.log_metrics('validation_loss',dict(val_loss).tolist())
59 mlflow.log_metrics('MAE Val_loss', dict(mae_loss).tolist())
TypeError: 'numpy.float32' object is not iterable
| Youre logging a single value into log_metrics and i dont think thats correct based on the implementation of log_metric and log_metrics in the documentation:
https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.log_metric and
https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.log_metrics
So i would suggest to maybe change the "log_metrics" to "log_metric" and leave the tolist out
| https://stackoverflow.com/questions/72431938/ |
RMSProp in TF VS Pytorch | In Pytorch's RMSProp implementation we are given the parameter alpha which according to the documentation:
alpha (float, optional) β smoothing constant (default: 0.99)
On the other hand, TF's implementation has the parameter rho (Formally named decay):
rho Discounting factor for the history/coming gradient. Defaults to 0.9.
Are those parameters the same with different names or are they different?
I couldn't find any information regarding the differences.
| If you compare the source code of PyTorch (here) and that of Tensorflow (on a forked build), you will see that alpha and rho are indeed the same.
Although, as opposed to Tensorflow, PyTorch is clear about the underlying logic for its module:
| https://stackoverflow.com/questions/72434215/ |
GPU available in Tensorflow but not in Torch | I'm currently working on a server and I would like to be able the GPUs for PyTorch network training. I am not able to detect GPU by using torch but, if I use TensorFlow, I can detect both of the GPUs I am supposed to have. I suppose it's a problem with versions within PyTorch/TensorFlow and the CUDA versions on it.
However, after trying different versions of Pytorch, I am not still able to use them...
I am attaching the specificities of the GPUs and the current version of Tensorflow and Pytorch I am using. Does anyone have any hint on it? Would be very helpful.
| NVIDIA-SMI 4--.--.-- Driver Version: 465.19.01 CUDA Version: 11.3 |
|-------------------------------+----------------------+----------------------|
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce ... On | 00000000:02:00.0 Off | N/A |
| 27% 39C P8 17W / 250W | 1MiB / 11176MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 1 NVIDIA GeForce ... On | 00000000:81:00.0 Off | N/A |
| 28% 45C P8 11W / 250W | 1MiB / 11178MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
$ nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2020 NVIDIA Corporation
Built on Wed_Jul_22_19:09:09_PDT_2020
Cuda compilation tools, release 11.0, V11.0.221
Build cuda_11.0_bu.TC445_37.28845127_0
Torch version: 1.10.2
Tensorflow Version: 2.6.2
Cuda toolkit: 11.3.1
>>> print('Number of GPUs: %d' % len(tf.config.list_physical_devices('GPU')))
Number of GPUs: 2
>>> torch.cuda.is_available()
False
I am so lost... Thank you in advance!
| I finally could resolve this problem by specifying the cuda version of pytorch... The combination of those specific versions was installing the CPU based version.
After installing the correct one, I have been able to use the GPU server without any problem.
| https://stackoverflow.com/questions/72445866/ |
LBFGS Giving Tensor Object not Callable Error when using Optimizer.step | I am trying to use sgd, adam, and LBFGS optimizer.
The part of the code is:
for batch_idx, (inputs, targets) in enumerate(trainloader):
batch_size = inputs.size(0)
total += batch_size
one_hot_targets = torch.FloatTensor(batch_size, 10).zero_()
one_hot_targets = one_hot_targets.scatter_(1, targets.view(batch_size, 1), 1.0)
one_hot_targets = one_hot_targets.float()
if use_cuda:
inputs, one_hot_targets = inputs.cuda(), one_hot_targets.cuda()
inputs, one_hot_targets = Variable(inputs), Variable(one_hot_targets)
if optimizer_val=='sgd' or optimizer_val=='adam':
outputs = F.softmax(net(inputs))
loss = criterion(outputs, one_hot_targets)
loss.backward()
optimizer.step()
else:
def closure():
optimizer.zero_grad()
outputs = F.softmax(net(inputs))
loss = criterion(outputs, one_hot_targets)
loss.backward()
return loss
optimizer.step(closure())
In the optimizer.step(closure()) part in LBFGS (running in else) I am getting this error:
TypeError: 'Tensor' object is not callable
I checked, the loss is tensor type.
How to make it work?
| You need to pass a function callback to the optimizer.step function, don't call it:
optimizer.step(closure)
| https://stackoverflow.com/questions/72447212/ |
Reshaping or "concatenating" a tensor along an axis | I have a tensor, t, of the following shape: torch.Size([280, 4, 768]).
What I want is to achieve, effectively, concatenation along the second axis, resulting in torch.Size([280, 3072]).
I know that I can for instance, do:
torch.cat((x[:, -4, :], x[:, -3, :], x[:, -2, :], x[:, -1, :]), dim=1)
but is there a nicer way of writing this?
How do I achieve reshaping along the second axis without messing up my values?
| Yes you can apply a straight forward reshape:
>>> x.reshape(len(x), -1)
| https://stackoverflow.com/questions/72447983/ |
Using the embedding layer as the input for an encoder | I want to use the embedding layer as the input for encoder, however I got an error as follow. My input y is a time series data with shape of 1*84. Could you please help me with that?
import numpy
import torch.nn as nn
r_input = torch.nn.Embedding(84, 10)
activation = nn.functional.relu
mu_r = nn.Linear(10, 6)
log_var_r = nn.Linear(10, 6)
y = np.random.rand(1,84)
def encode_r(y):
y = torch.reshape(y, (-1, 1, 84)) # torch.Size([batch_size, 1, 84])
hidden = torch.flatten(activation(r_input(y)), start_dim = 1)
z_mu = mu_r(hidden)
z_log_var = log_var_r(hidden)
return z_mu, z_log_var```
Error: RuntimeError: Expected tensor for argument #1 'indices' to have one of the following scalar types: Long, Int; but got torch.cuda.FloatTensor instead (while checking arguments for embedding)
| According to this thread: https://discuss.pytorch.org/t/expected-tensor-for-argument-1-indices-to-have-scalar-type-long-but-got-cpufloattensor-instead-while-checking-arguments-for-embedding/32441/4, it seems that one possible solution would be to ensure embeddings have integer values and not float values in them (by embeddings we mean the lookup table not an actual embedding vector).
| https://stackoverflow.com/questions/72451813/ |
How to input embeddings directly to a huggingface model instead of tokens? | I'm going over the huggingface tutorial where they showed how tokens can be fed into a model to generate hidden representations:
import torch
from transformers import RobertaTokenizer
from transformers import RobertaModel
checkpoint = 'roberta-base'
tokenizer = RobertaTokenizer.from_pretrained(checkpoint)
model = RobertaModel.from_pretrained(checkpoint)
sequences = ["I've been waiting for a HuggingFace course my whole life."]
tokens = tokenizer(sequences, padding=True)
out = model(torch.tensor(tokens['input_ids']))
out.last_hidden_state
But how can I input word embeddings directly instead of tokens? That is, I have another model that generates word embeddings and I need to feed those into the model
| Most (every?) huggingface encoder model supports that with the parameter inputs_embeds:
import torch
from transformers import RobertaModel
m = RobertaModel.from_pretrained("roberta-base")
my_input = torch.rand(2,5,768)
outputs = m(inputs_embeds=my_input)
P.S.: Don't forget the attention mask in case this is required.
| https://stackoverflow.com/questions/72454697/ |
pytorch optimizer TypeError 'collections.OrderedDict' object is not callable | I used python3.8, pytorch suddenly reported an error optimizer TypeError, but the program was still running two weeks ago.
net = Net(num_classes=7)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
It seems to be net.parameters() cannot return the correct result?
Traceback (most recent call last):
File "C:/Users/usr/Desktop/jaffeAttention/jaffe.py", line 190, in <module>
main()
File "C:/Users/usr/Desktop/jaffeAttention/jaffe.py", line 87, in main
optimizer = optim.Adam(net.parameters(), lr=1e-4)
TypeError: 'collections.OrderedDict' object is not callable
enter image description here
| You get the error because net.parameters is an OrderedDict, and you then try to call it with net.parameters(), but that is not possible. So you get an error.
To find this out, could try the commands
net = Net(num_classes=7)
net.to(device)
criterion = nn.CrossEntropyLoss()
params= net.parameters
print(type(params)) # verify this is indeed an OrderedDict
optimizer = optim.Adam(params, lr=1e-4) # notice there is no () when passing the params to Adam
You can read about OrderedDict in the python documentation. https://docs.python.org/3/library/collections.html#collections.OrderedDict
| https://stackoverflow.com/questions/72457671/ |
Select items from a matrix or tensor using 1-D array of indexes to get 1D or 2D tensors in torch | I have a tensor of n-sampled predictions. In this example I sample 4 times a (10,2) result, where this results represents a batch of graphs with each graph having 3+4+3=10 nodes with 2 coordinates. I have a way to select the best sample for each graph, and I have an index telling me this (idx_test). I want to select in an efficient way the elements of the sampled tensor. The best way I have found is like this (toy code example):
rand_test = torch.randint(3, 10, (4, 10, 2))
idx_test = torch.LongTensor([0, 3, 1])
n_nodes = [3, 4, 3]
final_tensor = torch.zeros((10, 2))
accum_nodes = torch.cumsum(torch.LongTensor(n_nodes), 0)
# First iteration is done outside the for loop
final_tensor[:accum_nodes[0]] = rand_test[idx_test[0],:accum_nodes[0],:]
for i, idx in enumerate(idx_test):
final_tensor[accum_nodes[i-1]:accum_nodes[i]] = rand_test[idx,accum_nodes[i-1] :accum_nodes[i], :]
print(final_tensor)
The objective is to obtain the same final tensor without using for loops since in my model this array is large
Thanks!
| You could create a helper tensor containing the ranges of node indices you want to index rand_test with. In your example, you have: i=0 you have 3 nodes with index value 0, i=1 has 4 with value 3, while i=2 has 3 with index value 1.
You can do so using repeat_interleave:
>>> idx_test.repeat_interleave(torch.tensor(n_nodes))
tensor([0, 0, 0, 3, 3, 3, 3, 1, 1, 1])
Then index rand_test using idx and a range:
>>> final_tensor = rand_test[idx, range(len(idx))]
tensor([[5, 5],
[7, 3],
[8, 4],
[7, 5],
[7, 6],
[7, 8],
[9, 9],
[7, 7],
[8, 7],
[3, 7]])
| https://stackoverflow.com/questions/72459047/ |
way for multiplication of these tensors with gradients | I have a function with two inputs: heat maps and feature maps.
The heatmaps have a shape of (20, 14, 64, 64) and the feature maps have a shape of (20, 64, 64, 64). Where 20 is the batch size and 14 is the number of key points. Both heatmaps and feature maps have spatial dimensions of 64x64 and the featuremaps have 64 channels (on the second dimension).
Now I need to multiply each heatmap by each channel of the feature maps. So the first heatmap has to be multiplied by all 64 channels of the feature maps. The second with all channels, and so on.
After that, I should have a tensor of shape (20, 14, 64, 64, 64) on which I need to apply global max-pooling.
The problem is now that I can't create a new tensor to do that, because the gradients of the heatmaps and feature maps must be preserved.
My actual (slow and not-gradient-keeping) code is:
def get_keypoint_representation(self, heatmaps, features):
heatmaps = heatmaps[0]
pool = torch.nn.MaxPool2d(features.shape[2])
features = features[:, None, :, :, :]
features = features.expand(-1, 14, -1, -1, -1).clone()
for i in range(self.cfg.SINGLE_GPU_BATCH_SIZE):
for j in range(self.cfg.NUM_JOINTS):
for k in range(features.shape[2]):
features[i][j][k] = torch.matmul(heatmaps[i][j], features[i][j][k])
gmp = features.amax(dim=(-1, -2))
return gmp
Overview of the task:
| Given a tensor of heatmaps hm shaped (b, k, h, w) and a feature tensor fm shaped (b, c, h, w).
You can perform such an operation with a single einsum operator
>>> z = torch.einsum('bkhw,bchw->bkchw', hm, FM)
>>> z.shape
torch.Size([20, 14, 64, 64, 64])
Then follow with a max-pooling operation over the spatial dimensions using amax:
>>> gmp = z.amax(dim=(-1,-2))
>>> gmp.shape
torch.Size([20, 14, 64])
| https://stackoverflow.com/questions/72460851/ |
Reading files with .h5 format and using it in dataset | I have two folders( one for train and one for test) and each one has around 10 files in h5 format. I want to read them and use them in a dataset. I have a function to read them, but I don't know how I can use it to read the file in my class.
def read_h5(path):
data = h5py.File(path, 'r')
image = data['image'][:]
label = data['label'][:]
return image, label
class Myclass(Dataset):
def __init__(self, split='train', transform=None):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
raise NotImplementedError
Do you have a suggestion?
Thank you in advance
| This might be a start for what you want to do. I implemented the __init__(), but not __len__() or __get_item__(). User provides the path, and the init function calls the class method read_h5() to get the arrays of image and label data. There is a short main to create a class objects from 2 different H5 files. Modify the paths list with folder and filenames for all of your training and testing data.
class H5_data():
def __init__(self, path): #split='train', transform=None):
self.path = path
self.image, self.label = H5_data.read_h5(path)
@classmethod
def read_h5(cls,path):
with h5py.File(path, 'r') as data:
image = data['image'][()]
label = data['label'][()]
return image, label
paths = ['train_0.h5', 'test_0.h5']
for path in paths:
h5_test = H5_data(path)
print(f'For HDF5 file: {path}')
print(f'image data, shape: {h5_test.image.shape}; dtype: {h5_test.image.dtype}')
print(f'label data, shape: {h5_test.label.shape}; dtype: {h5_test.label.dtype}')
IMHO, creating a class with the array data is overkill (and could lead to memory problems if you have really large datasets). It is more memory efficient to create h5py dataset objects, and access the data when you need it. Example below does the same as code above, without creating a class object with numpy arrays.
paths = ['train_0.h5', 'test_0.h5']
for path in paths:
with h5py.File(path, 'r') as data:
image = data['image']
label = data['label']
print(f'For HDF5 file: {path}')
print(f'image data, shape: {image.shape}; dtype: {image.dtype}')
print(f'label data, shape: {label.shape}; dtype: {label.dtype}')
| https://stackoverflow.com/questions/72461845/ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.