Python/NetForStatistics.py

import torch
import torchvision
import torchvision.transforms as transforms

import torch.optim as optim

import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self,dev=None):
        super().__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9)
        if dev:
           self.device = torch.device('cpu')
        else:
            self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        print("Device:", self.device)
        self.to(self.device)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = torch.flatten(x, 1) # flatten all dimensions except batch
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


def getDataset():

   # The transform normalises the dataset
   transform = transforms.Compose(
       [transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
   
   batch_size = 4
   
   # We use standard datasets provided by torchvision, in this case CIFAR10
   trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                           download=True, transform=transform)
   trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                             shuffle=True, num_workers=2)
   
   testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                          download=True, transform=transform)
   testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                         shuffle=False, num_workers=2)
   return (trainloader,testloader)

def evalmodel(net,dataloader):
    tloss = 0.0
    net.eval()
    for i, data in enumerate(dataloader, 0):

        inputs, labels = (data[0].to(net.device), data[1].to(net.device))

        outputs = net(inputs)
        loss = net.criterion(outputs, labels)

        tloss += loss.item()
    return(tloss/len(dataloader))
def trainmodel(net,dataloader):
    running_loss = 0.0
    tloss = 0.0
    net.train()
    for i, data in enumerate(dataloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = (data[0].to(net.device), data[1].to(net.device))

        # zero the parameter gradients
        net.optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = net.criterion(outputs, labels)
        loss.backward()
        net.optimizer.step()

        # print statistics
        running_loss += loss.item()
        tloss += loss.item()
        if i % 2000 == 1999:    # print every 2000 mini-batches
            print(f'[Training, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
            running_loss = 0.0
    return(tloss/len(dataloader))