Reputation: 37
I am training that model to classify 3 classes (0,1,2). I am using cross validation for 2 fold, I am using pytorch, I would like to plot the accuracy and loss function for training and test dataset over the number epochs on the same plot. I do know how to do that . especially I just evaluate the test once I finish training , Is there is way that I can have that plot for both training data and test data
# Configuration options
k_folds = 2
loss_function = nn.CrossEntropyLoss()
# For fold results
results = {}
# Set fixed random number seed
torch.manual_seed(42)
# Prepare dataset by concatenating Train/Test part; we split later.
training_set = CustomDataset('one_hot_train_data.txt','train_3states_target.txt') #training_set = CustomDataset_3('one_hot_train_data.txt','train_5_target.txt')
training_generator = torch.utils.data.DataLoader(training_set, **params)
val_set = CustomDataset('one_hot_val_data.txt','val_3states_target.txt')
test_set = CustomDataset('one_hot_test_data.txt','test_3states_target.txt')
#testloader = torch.utils.data.DataLoader(test_set, **params)
#dataset1 = ConcatDataset([training_set, val_set])
dataset = ConcatDataset([training_set,test_set])
kfold = KFold(n_splits=k_folds, shuffle=True)
# Start print
print('--------------------------------')
# K-fold Cross Validation model evaluation
for fold, (train_ids, test_ids) in enumerate(kfold.split(dataset)):
# Print
print(f'FOLD {fold}')
print('--------------------------------')
# Sample elements randomly from a given list of ids, no replacement.
train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
test_subsampler = torch.utils.data.SubsetRandomSampler(test_ids)
# Define data loaders for training and testing data in this fold
trainloader = torch.utils.data.DataLoader(
dataset,**params, sampler=train_subsampler)
testloader = torch.utils.data.DataLoader(
dataset,
**params, sampler=test_subsampler)
# Init the neural network
model = PPS()
model.to(device)
# Initialize optimizer
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
# Run the training loop for defined number of epochs
train_acc = []
for epoch in range(0, N_EPOCHES):
# Print epoch
print(f'Starting epoch {epoch + 1}')
# Set current loss value
running_loss = 0.0
epoch_loss = 0.0
a = []
# Iterate over the DataLoader for training data
for i, data in enumerate(trainloader, 0):
inputs, targets = data
inputs = inputs.unsqueeze(-1)
#inputs = inputs.to(device)
targets = targets.to(device)
inputs = inputs.to(device)
# print(inputs.shape,targets.shape)
# Zero the gradients
optimizer.zero_grad()
# Perform forward pass
loss,outputs = model(inputs,targets)
outputs = outputs.to(device)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
# print statistics
running_loss += loss.item()
epoch_loss += loss
a.append(torch.sum(outputs == targets))
# print(outputs.shape,outputs.shape[0])
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000), "acc",
torch.sum(outputs == targets) / float(outputs.shape[0]))
running_loss = 0.0
# sum_acc += (outputs == stat_batch.argmax(1)).float().sum()
print("epoch", epoch + 1, "acc", sum(a) / len(train_subsampler), "loss", epoch_loss / len(trainloader))
train_acc.append(sum(a) / len(train_subsampler))
state = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict() }
torch.save(state, path + name_file + "model_epoch_i_" + str(epoch) + str(fold)+".cnn")
#torch.save(model.state_dict(), path + name_file + "model_epoch_i_" + str(epoch) + ".cnn")
# Print about testing
print('Starting testing')
# Evaluation for this fold
correct, total = 0, 0
with torch.no_grad():
# Iterate over the test data and generate predictions
for i, data in enumerate(testloader, 0):
# Get inputs
inputs, targets = data
#targets = targets.to(device)
inputs = inputs.unsqueeze(-1)
inputs = inputs.to(device)
# Generate outputs
loss,outputs = model(inputs,targets)
outputs.to(device)
print("out",outputs.shape)
print("target",targets.shape)
print("targetsize",targets.size(0))
print("sum",(outputs == targets).sum().item())
#print("sum",torch.sum(outputs == targets))
# Set total and correct
# _, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (outputs == targets).sum().item()
#correct += torch.sum(outputs == targets)
# Print accuracy
print('Accuracy for fold %d: %d %%' % (fold,float( 100.0 * float(correct / total))))
print('--------------------------------')
results[fold] = 100.0 * float(correct / total)
# Print fold results
print(f'K-FOLD CROSS VALIDATION RESULTS FOR {k_folds} FOLDS')
print('--------------------------------')
sum = 0.0
for key, value in results.items():
print(f'Fold {key}: {value} %')
sum += value
print(f'Average: {float(sum / len(results.items()))} %')
Upvotes: 0
Views: 6229
Reputation: 1290
You could use Tensorboard that is built especially for that, here is the doc for pytorch : https://pytorch.org/docs/stable/tensorboard.html
So in your case when you are printing the result, you can just do a
writer.add_scalar('accuracy/train', torch.sum(outputs == targets) / float(outputs.shape[0]), n_iter)
EDIT : adding small example that you can follow
Let's say that you are training a model :
model_name = 'network'
log_name = '{}_{}'.format(model_name, strftime('%Y%m%d_%H%M%S'))
writer = SummaryWriter('logs/{}'.format(log_name))
net = Model()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
for epoch in range(num_epochs):
losses = []
for i, (inputs,labels) in enumerate (trainloader):
inputs = Variable(inputs.float())
labels = Variable(labels.float())
outputs = net(inputs)
optimizer.zero_grad()
loss = criterion(outputs, labels)
losses.append(loss)
loss.backward()
optimizer.step()
correct_values += (outputs == labels).float().sum()
accuracy = 100 * correct_values / len(training_set)
avg_loss = sum(losses) / len(training_set)
writer.add_scalar('loss/train', avg_loss.item(), epoch)
writer.add_scalar('acc/train', accuracy, epoch)
Upvotes: 4