# PyTorch model and training necessities import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim
# Image datasets and image manipulation import torchvision import torchvision.transforms as transforms
# Image display import matplotlib.pyplot as plt import numpy as np
# PyTorch TensorBoard support from torch.utils.tensorboard import SummaryWriter
# In case you are using an environment that has TensorFlow installed, # such as Google Colab, uncomment the following code to avoid # a bug with saving embeddings to your TensorBoard directory
# import tensorflow as tf # import tensorboard as tb # tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
# Gather datasets and prepare them for consumption transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
# Store separate training and validations splits in ./data training_set = torchvision.datasets.FashionMNIST('./data', download=True, train=True, transform=transform) validation_set = torchvision.datasets.FashionMNIST('./data', download=True, train=False, transform=transform)
# Default log_dir argument is "runs" - but it's good to be specific # torch.utils.tensorboard.SummaryWriter is imported above writer = SummaryWriter('runs/fashion_mnist_experiment_1')
# Write image data to TensorBoard log dir writer.add_image('Four Fashion-MNIST Images', img_grid) writer.flush()
# To view, start TensorBoard on the command line with: # tensorboard --logdir=runs # ...and open a browser tab to http://localhost:6006/
defforward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 4 * 4) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
print(len(validation_loader)) for epoch inrange(1): # loop over the dataset multiple times running_loss = 0.0
for i, data inenumerate(training_loader, 0): # basic training loop inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()
running_loss += loss.item() if i % 1000 == 999: # Every 1000 mini-batches... print('Batch {}'.format(i + 1)) # Check against the validation set running_vloss = 0.0
# In evaluation mode some model specific operations can be omitted eg. dropout layer net.train(False) # Switching to evaluation mode, eg. turning off regularisation for j, vdata inenumerate(validation_loader, 0): vinputs, vlabels = vdata voutputs = net(vinputs) vloss = criterion(voutputs, vlabels) running_vloss += vloss.item() net.train(True) # Switching back to training mode, eg. turning on regularisation