deftrain_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) # Set the model to training mode - important for batch normalization and dropout layers # Unnecessary in this situation but added for best practices model.train() for batch, (X, y) inenumerate(dataloader): # Compute prediction and loss pred = model(X) loss = loss_fn(pred, y)
if batch % 100 == 0: loss, current = loss.item(), (batch + 1) * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
deftest_loop(dataloader, model, loss_fn): # Set the model to evaluation mode - important for batch normalization and dropout layers # Unnecessary in this situation but added for best practices model.eval() size = len(dataloader.dataset) num_batches = len(dataloader) test_loss, correct = 0, 0
# Evaluating the model with torch.no_grad() ensures that no gradients are computed during test mode # also serves to reduce unnecessary gradient computations and memory usage for tensors with requires_grad=True with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item()