|
#ml
ML & PyTorch
Hello World: Linear Regression
Based on this tutorial.
######### Loading and normalizing CIFAR10 #########
12345678901234567890123456789012345678901234567890123456789012345678901234567890
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
a
######### Define a Linear Regression Model #########
import torch.nn as nn
import torch.nn.functional as F
class LinearRegressionModel(nn.Module):
def __init__(self):
super(LinearRegressionModel, self).__init__()
self.linearModel = nn.Linear(3 * 32 * 32, 120)
def forward(self, x):
return self.linearModel(x.view(-1, 16 * 5 * 5))
linearModel = LinearRegressionModel()
a
######### Define a Loss function and optimizer ##########
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
a
########## Run test first to get baseline ##########
net.eval() # Makes net more efficient by telling it it can ignore backpro
running_accuracy = 0
for i, (inputs, labels) in enumerate(testloader, 0):
outputs = net(inputs).cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
running_accuracy += (outputs.argmax(1) == labels).mean()
running_accuracy /= TEST_BATCH_COUNT
print("accuracy", running_accuracy)
a
########## Train the network ##########
net.train()
for epoch in range(2): # loop over the dataset multiple times
training_count = 0
training_loss = 0
training_accuracy = 0
for i, (inputs, labels) in enumerate(trainloader, 0):
inputs = inputs.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
training_count += 1
training_loss += loss.item()
training_accuracy += (outputs.cpu().detach().numpy().argmax(1)
== labels.cpu().detach().numpy()).mean()
print('loss: %.3f accuracy: %.3f' % (running_loss / training_count,
running_accuracy / training_count))
a
########## Run test again at the end ##########
net.eval() # Makes net more efficient by telling it it can ignore backpro
running_accuracy = 0
for i, (inputs, labels) in enumerate(testloader, 0):
if HAS_GPU: inputs = inputs.cuda()
outputs = net(inputs).cpu().detach().numpy()
labels = labels.detach().numpy()
running_accuracy += (outputs.argmax(1) == labels).mean()
running_accuracy /= TEST_BATCH_COUNT
print("accuracy", running_accuracy)
a
|