import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as datasets
from torchvision import transforms
import torchvision.utils
from tqdm import tqdm
import matplotlib.pyplot as plt
mnist = datasets.MNIST(root="./data", train=True, download=True, transform=transforms.ToTensor())
The PyTorch DataLoader class is an efficient implementation of an iterator that can perform useful preprocessing and returns batches of elements. Here, we use its ability to batch and shuffle data, but DataLoaders are capable of much more.
Note that each time we iterate over a DataLoader, it starts again from the beginning.
Below we use torchvision.utils.make_grid()
to show a sample batch of inputs.
data_loader = torch.utils.data.DataLoader(mnist, batch_size=64, shuffle=True)
# Show one batch of images. Each batch of images has shape [batch_size, 1, 28, 28],
# where 1 is the "channels" dimension of the image.
for images,labels in data_loader:
grid_img = torchvision.utils.make_grid(images)
plt.imshow(grid_img.permute(1, 2, 0))
plt.title("A single batch of images")
break
Here we define a simple 1-hidden-layer neural network for classification on MNIST. It takes a parameter that determines the hidden size of the hidden layer.
class MNISTNetwork(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear_0 = nn.Linear(784, hidden_size)
self.linear_1 = nn.Linear(hidden_size, 10)
def forward(self, inputs):
x = self.linear_0(inputs)
x = torch.sigmoid(x)
return self.linear_1(x)
We will consider three networks.
In the code below, we utilize some important PyTorch methods which you'll want to be familiar with. This includes:
torch.nn.Module.parameters()
: Returns an iterator over module parameters (i.e. for passing to an optimizer that will update those parameters).
torch.Tensor.view()
: Returns a view into the original Tensor. The result of this method shares the same underlying data as the input Tensor. This avoids copying the data, which means it can be mnore efficient, but it also means that when the original Tensor is modified, so is the view!
torch.Tensor.item()
: Returns the value of a single-element Tensor as a standard Python number. This only works for tensors with one element. For other cases, see torch.Tensor.tolist()
.
torch.Tensor.backward()
: Computes the gradients of current tensor wrt the graph leaves (note that this is only called if Tensor.requires_grad
is True, which is the case by default). After calling this, a Tensor's .grad
attribute is updated with the current gradients. These are used, for example, when calling .step()
method of an optimizer.
torch.optim.Optimizer.zero_grad()
: Sets the gradients of all variables to zero. This should be conducted before each step of an optimization procedure (i.e., for each batch of training a DNN). If .zero_grad()
is not called, gradients accumulate (add) over iterations.
small_net = MNISTNetwork(1)
large_net = MNISTNetwork(64)
large_net_rand = MNISTNetwork(64)
for p in zip(small_net.parameters(), large_net.parameters()):
p1, p2 = p
p1.data = torch.zeros_like(p1.data)
p2.data = torch.zeros_like(p2.data)
We will train all three networks simulateneously using the same learning rate. After each epoch, we print the current loss of each network.
epochs = 32
optimizer_small = optim.Adam(small_net.parameters(), lr=5e-3)
optimizer_large = optim.Adam(large_net.parameters(), lr=5e-3)
optimizer_large_rand = optim.Adam(large_net_rand.parameters(), lr=5e-3)
for i in range(epochs):
loss_small_epoch = 0.
loss_large_epoch = 0.
loss_large_rand_epoch = 0.
for batch in tqdm(data_loader):
images, labels = batch
images, labels = images, labels
images = images.view(-1, 784)
optimizer_small.zero_grad()
optimizer_large.zero_grad()
optimizer_large_rand.zero_grad()
y_small = small_net(images)
y_large = large_net(images)
y_large_rand = large_net_rand(images)
loss_small = F.cross_entropy(y_small, labels)
loss_large = F.cross_entropy(y_large, labels)
loss_large_rand = F.cross_entropy(y_large_rand, labels)
loss_small_epoch += loss_small.item()
loss_large_epoch += loss_large.item()
loss_large_rand_epoch += loss_large_rand.item()
loss_small.backward()
loss_large.backward()
loss_large_rand.backward()
optimizer_small.step()
optimizer_large.step()
optimizer_large_rand.step()
print("Small Loss:", loss_small_epoch / len(data_loader))
print("Large Loss:", loss_large_epoch / len(data_loader))
print("Large rand Loss:", loss_large_rand_epoch / len(data_loader))
W_0 = large_net.linear_0.weight
b_0 = large_net.linear_0.bias
W_1 = large_net.linear_1.weight
b_1 = large_net.linear_1.bias
print("W_0 => All weights equal for each hidden unit:", (W_0[0, :].unsqueeze(0) == W_0).all().item())
print("Example of weights:")
print(W_0[:, 256])
print("W_1 => All weights equal for each hidden unit:", (W_1[:, 0].unsqueeze(-1) == W_1).all().item())
print("Weights:")
print(W_1[8])
print("b_0 => All biases equal for each hidden unit:", (b_0[0] == b_0).all().item())
print("Bias:")
print(b_0)
print("b_1 => All biases equal for each hidden unit:", (b_1[0] == b_1).all().item())
print("Bias:")
print(b_1)
Below is an implementation of the network from the section handout. We use torchinfo-summary()
to view the size of the data as it flows through the network; additionally, we print and the size of the weights and biases of the layers during a forward pass. Note that this network is just for demonstration and may not work well in practice.
Note: this section uses the torchinfo
package; see the github repo for installation instructions or run one of the following lines below:
install via conda:
conda install -c conda-forge torchinfo
install via pip:
pip install torchinfo
from torchinfo import summary
class DemoNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3, 1, 1)
self.max1 = nn.MaxPool2d(2, 2, 0)
self.conv2 = nn.Conv2d(16, 32, 3, 1, 0)
self.max2 = nn.MaxPool2d(2, 2, 1)
self.conv3 = nn.Conv2d(32, 8, 1, 1, 0)
self.conv4 = nn.Conv2d(8, 4, 5, 1, 0)
self.flatten = nn.Flatten()
self.linear1 = nn.Linear(576, 10)
@property
def trainable_layers(self):
"""A utility property to easily access a list of all model layers."""
return [self.conv1, self.conv2, self.conv3, self.conv4, self.linear1]
def forward(self, inputs):
"""Implements the forward pass."""
x = self.conv1(inputs)
x = self.max1(x)
x = self.conv2(x)
x = self.max2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.linear1(x)
return x
def print_weight_shapes(self):
"""Utility function to print the shapes of weights in trainable layers."""
for layer in self.trainable_layers:
print(f"Weight shape: {layer.weight.shape}; Bias shape: {layer.bias.shape}")
demo = DemoNetwork()
batch_size = 64
summary(demo, input_size=(batch_size, 3, 64, 64))
demo.print_weight_shapes()