Code Address: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/logistic_regression/main.py logistic_regression
Import Torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as Transforms from
Torch.autograd import Variable
Defining hyper-parameters and datasets and reading data
# Hyper Parameters
input_size = 784
num_classes = ten
Num_epochs = 5
batch_size = Learning_rate =
0 .001
# MNIST Dataset (Images and Labels)
Train_dataset = dsets. MNIST (root= './data ',
train=true,
transform=transforms. Totensor (),
download=true)
test_dataset = dsets. MNIST (root= './data ',
train=false,
transform=transforms. Totensor ())
# Dataset Loader (Input pipline)
Train_loader = Torch.utils.data.DataLoader (dataset=train_ DataSet,
batch_size=batch_size,
shuffle=true)
Test_loader = Torch.utils.data.DataLoader (dataset= Test_dataset,
batch_size=batch_size,
shuffle=false)
Where torchvision.datasets contains a number of common data sets
dsets. the prototype of MNIST is
Class Torchvision.datasets.MNIST (Root, Train=true, Transform=none, Target_transform=none, Download=false)
Where Root is the path to the Mnist dataset, storing the processed/training.pt and processed/test.pt train for the training flag, differentiating the training and test datasets transform a list of image processing Download whether to download datasets from the Web
Define the model with only one linear layer.
# Model
class Logisticregression (NN. Module):
def __init__ (self, Input_size, num_classes):
super (Logisticregression, self). __init__ ()
Self.linear = nn. Linear (Input_size, num_classes)
def forward (self, x): Off
= self.linear (x)
return out
model = Logisticregression (Input_size, num_classes)
Define loss and optimization algorithms, the principle of reference to the previous blog http://blog.csdn.net/q295684174/article/details/79014451.
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
Criterion = nn. Crossentropyloss ()
optimizer = Torch.optim.SGD (Model.parameters (), lr=learning_rate)
Start training.
# Training the Model
for epoch in range (NUM_EPOCHS):
for I, (images, labels) in Enumerate (train_loader):
imag Es = Variable (Images.view ( -1, 28*28))
labels = Variable (labels)
# Forward + backward + Optimize
Optimizer.zero_grad ()
outputs = model (IMAGES)
loss = criterion (outputs, labels)
Loss.backward ()
optimizer.step ()
if (i+1)% = = 0:
print (' Epoch: [%d/%d], step: [%d/%d], Loss:%.4f '
% (epoch+1, Num_epochs, I+1, Len (train_dataset)//batch_size, loss.data[0]))
The prototype of Images.view is
View (*args) →tensor
Returns a new tensor, the same as the data for the new tensor and args, but not the same dimension. Args must be continuous in order to use view. When there is a dimension of-1, the dimension is inferred from
Example
>>> x = Torch.randn (4, 4)
>>> x.size ()
Torch. Size ([4, 4])
>>> y = X.view (+)
>>> y.size ()
Torch. Size ([+])
>>> z = X.view ( -1, 8) # The size-1 is inferred from other dimensions
>>> z.size ( )
Torch. Size ([2, 8])
Test.
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable (Images.vie W ( -1, 28*28))
outputs = model (IMAGES)
_, predicted = Torch.max (Outputs.data, 1) total
+ = labels.size (0) C17/>correct + = (predicted = = labels). SUM ()
print (' accuracy of the model on the 10000 test images:%d percent '% (co Rrect/total))
The final accuracy rate is about 82%.