关于python:学习PytorchPython之MNIST手写字体识别

34次阅读

共计 3229 个字符,预计需要花费 9 分钟才能阅读完成。

import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
import torchvision.utils
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data

判断是否能用 GPU,如果能就用 GPU,不能就用 CPU

use_gpu = torch.cuda.is_available()
device = torch.device(“cuda” if torch.cuda.is_available() else “cpu”)

数据转换,Pytorch 的底层是 tensor(张量),所有用来训练的图像均须要转换成 tensor

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])

下载数据集

data_train = datasets.MNIST(root=”./data/”, transform=transform, train=True, download=True)
data_test = datasets.MNIST(root=”./data/”, transform=transform, train=False)

加载数据集,批次大小为 64,shuffle 示意乱序

data_loader_train = torch.utils.data.DataLoader(dataset=data_train, batch_size=64, shuffle=True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=64, shuffle=True)

创立模型即网络架构

class Model(nn.Module):

def __init__(self):
    super(Model, self).__init__()
    #创立二维卷积
    self.conv1 = nn.Sequential(
        #输出特色数量为 1,输入特色数量为 64,卷积核大小为 3x3,步长为 1,边缘填充为 1,保障了卷积后的特色尺寸与原来一样
        nn.Conv2d(1, 64, kernel_size=3, stride=1, c=1),
        nn.ReLU(),
        nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
        nn.ReLU(),
        #最大池化,特色数量不变,尺寸减半 [(input-kernel_size)/stride + 1]
        nn.MaxPool2d(stride=2, kernel_size=2)
    )
    #创立全连贯
    self.dense = nn.Sequential(nn.Linear(14*14*128, 1024),
        nn.ReLU(),
        #随机抛弃局部结点,避免过拟合
        nn.Dropout(p=0.5),
        nn.Linear(1024, 10)
    )
#创立好网络结构后,建设前向流传
def forward(self, x):
    #对数据进行卷积操作
    x = self.conv1(x)
    #扭转特色形态
    x = x.c(-1, 14*14*128)
    #对特色进行全连贯
    x = self.dense(x)
    return x

类实例化

model = Model()

指定数据训练次数

epochs = 5

设置学习率,即梯度降落的权重,其值越大收敛越快,越小收敛越慢

learning_rate = 0.0001

选用参数优化器,这里应用 Adam

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

选用损失函数,利率期货这里应用穿插熵函数,来断定理论的输入与冀望的输入的靠近水平

criterion = nn.CrossEntropyLoss()

判断是否应用 GPU 训练

if(use_gpu):

model = model.cuda()
loss_f = criterion.cuda()

用 for 循环的形式实现数据的批次训练

for epoch in range(epochs):

# 定义并初始化训练过程的损失以及正确率
running_loss = 0
running_correct = 0
for data in data_loader_train:
    x_train, y_train = data
    x_train, y_train =x_train.cuda(), y_train.cuda()
    x_train = x_train.to(device)
    y_train = y_train.to(device)
    #将预处理好的数据加载到实例化好的 model 模型中,进行训练失去输入
    outputs = model(x_train)
    _, pred = torch.max(outputs.data, 1)
    #每次循环中,梯度必须清零,避免梯度重叠
    optimizer.zero_grad()
    #调用设定的损失
    loss = criterion(outputs, y_train)
    #反向流传损失
    loss.backward()
    #参数更新
    optimizer.step()
    #更新损失
    running_loss += loss.item()
    #更新正确率
    running_correct += torch.sum(pred == y_train.data)
testing_correct = 0
#查看每轮训练后,测试数据集中的正确率
for data in data_loader_test:
    x_test, y_test = data
    x_test, y_test = Variable(x_test), Variable(y_test)
    x_test = x_test.to(device)
    y_test = y_test.to(device)
    outputs = model(x_test)
    _, pred = torch.max(outputs.data, 1)
    testing_correct += torch.sum(pred == y_test.data)
    print("Loss is {}, Training Accuray is {}%, Test Accurray is {}".format(running_loss/len(data_train), 100*running_correct/len(data_train), 100*testing_correct/len(data_test)))

测试训练好的模型

随机加载 4 个手写数字

data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=4, shuffle=True)

函数 next 相干 https://www.runoob.com/python…

函数 iter 相干 https://www.runoob.com/python…

x_test,y_test = next(iter(data_loader_test))
inputs = Variable(x_test)
inputs = inputs.to(device)
pred = model(inputs)

_为输入的最大值,pred 为最大值的索引值

_,pred = torch.max(pred, 1)
print(‘Predict Label is :’, [i for i in pred.data])
print(‘Real Label is:’, [ i for i in y_test] )
img = torchvision.utils.make_grid(x_test)
img = img.numpy().transpose(1, 2, 0)
std = [0.5]
mean = [0.5]
img = img*std+mean
plt.imshow(img)
plt.show()

正文完
 0