分享

卷积网络与全连接网络比较分析

 算法与编程之美 2023-04-10 发布于四川

1 问题

平均池化和最大池化比较。

2 方法

平均池化训练10个周期

       from matplotlib import pyplot as plt
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch import nn
import torch
from torchvision.transforms import Normalize
from PIL import Image
'''
合并test()和val()
'''
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
# print(device)
class MyNet(nn.Module):
   def __init__(self) -> None:
       super().__init__()
       # 定义2个卷积层,1个全连接层
       self.conv1=nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1)#[-,16,28,28]
       self.conv2=nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3,stride=1,padding=1)
       self.max_pool_1=nn.MaxPool2d(2)
       self.conv3=nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1)
       self.max_pool_2=nn.MaxPool2d(2)
       self.conv4=nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1)
       self.ada_pool=nn.AdaptiveAvgPool2d(1)
       self.fc1=nn.Linear(in_features=128*1*1,out_features=512)
       self.fc2=nn.Linear(in_features=512,out_features=10)
   def forward(self, x):
       x = torch.relu(self.conv1(x))
       x = torch.relu(self.conv2(x))
       x = torch.relu(self.max_pool_1(x))
       x = torch.relu(self.conv3(x))
       x = torch.relu(self.max_pool_2(x))
       x = torch.relu(self.conv4(x))
       x = torch.relu(self.ada_pool(x))
       x = torch.flatten(x, 1)  # ! [B,C,H,W] 1的目的是拉伸CHW
       x = torch.relu(self.fc1(x))
       out=torch.relu(self.fc2(x))
       return out
# loss_list 记录每一个周期的平均loss数据
def train(dataloader, net, loss_fn, optimizer, epoch):
   size = len(dataloader.dataset)
   corrent = 0
   epoch_loss = 0.0
   batch_num = len(dataloader)
   net.train()
   # 一个batch一个batch的训练网络
   for batch_idx, (X, y) in enumerate(dataloader):
       pred = net(X)
       # 衡量y与y_hat之间的loss
       # y:128, pred:128x10 CrossEntropyloss
       loss = loss_fn(pred, y)
       # 基于loss信息利用优化器从后向前更新网络全部参数 <---
       optimizer.zero_grad()
       loss.backward()
       optimizer.step()
       epoch_loss += loss.item()
       corrent += (pred.argmax(1) == y).type(torch.float).sum().item()
       if batch_idx % 100 == 0:
           # f-string {:指定格式}
           print(f'[{batch_idx + 1:>5d}/{batch_num + 1:>5d}],loss:{loss.item()}')
   avg_loss = epoch_loss / batch_num
   avg_accuracy = corrent / size
   # loss_list.append(avg_loss)
   return avg_accuracy, avg_loss
# def val(dataloder, net, loss_fn):
#     size = len(dataloder.dataset)
#     batch_num = len(dataloder)
#     val_losses = 0
#     correct = 0
#     net.eval()
#     with torch.no_grad():
#         for X, y in dataloder:
#             X, y = X, y
#             pred = net(X)
#             loss = loss_fn(pred, y)
#             val_losses += loss.item()
#             correct += (pred.argmax(1) == y).type(torch.int).sum().item()
#
#     accuracy = correct / size
#     avg_loss = val_losses / batch_num
#
#     return accuracy, avg_loss
def test(dataloader, net, loss_fn):
   size = len(dataloader.dataset)
   batch_num = len(dataloader)
   corrent = 0
   losses = 0
   net.eval()
   with torch.no_grad():
       for X, y in test_loader:
           pred = net(X)
           correct = (pred.argmax(1) == y).type(torch.int).sum().item()
           loss = loss_fn(pred,y)
           losses += loss.item()
           # print(y.size(0))
           # print(correct)
           corrent += correct
   accuracy = corrent / size
   avg_loss = losses / batch_num
   return accuracy, avg_loss
# def plot_loss(train_loss,val_loss):
#     n = len(train_loss)
#     plt.plot(range(n),train)
if __name__ == "__main__":
   bat = 128
   transform = transforms.Compose([
       transforms.ToTensor(),
       transforms.Normalize(0.1307, 0.3081)  # (均值,方差)
   ])  # Compoes 两个操作合为一个
   train_ds = datasets.MNIST(root='data', download=False, train=True,
                             transform=transform)
   train_ds, val_ds = torch.utils.data.random_split(train_ds, [50000, 10000])
   test_ds = datasets.MNIST(root='data', download=True, train=False,
                            transform=transform)
   train_loader = DataLoader(dataset=train_ds, batch_size=bat, shuffle=True)
   val_loader = DataLoader(dataset=val_ds, batch_size=bat)
   test_loader = DataLoader(dataset=test_ds, batch_size=bat)
   net = MyNet()
   optimizer = torch.optim.SGD(net.parameters(), 0.01)
   # 损失函数
   # 衡量y与y_hat之间的差异
   loss_fn = nn.CrossEntropyLoss()
   train_losses = []
   train_accuracy_list = []
   train_loss_list = []
   val_accuracy_list = []
   val_loss_list = []
   # 找100个周期里面最好的模型
   # 评价标准:验证集精度
   best_acc = 0
   for epoch in range(10):
       print(f'Epoch:{epoch + 1}')
       train_accuracy, train_loss = train(train_loader, net, loss_fn, optimizer, epoch)
       train_accuracy_list.append(train_accuracy)
       train_loss_list.append(train_loss)
       print(f'Train Acc:{train_accuracy},Train loss:{train_loss}')
       val_accuracy, val_loss = test(val_loader, net, loss_fn)
       val_accuracy_list.append(val_accuracy)
       val_loss_list.append(val_loss)
       print(f'val Acc:{val_accuracy},val loss:{val_loss}')
       if val_accuracy > best_acc:
           best_acc = val_accuracy
           # 保存当前最好的模型
           # 保存的是模型对应的参数 y = ax + b
           torch.save(net.state_dict(), 'model_best.pth1')
   net.load_state_dict(torch.load('model_best.pth1'))
   accuracy, _ = test(test_loader, net, loss_fn)
   print(f'模型在测试集的精度是:{accuracy}'

最大值池化

平均池化

3 结语

区别:前向传播中计算pool区域内的最大值并记录该最大值所在输入数据中的位置,为了在反向传播中,需要把梯度值传到对应最大值的位置置。而且反向传播也就是把梯度值直接传给前一层某一个像素,而其他像素不接受梯度,也就是为0。所以max pooling操作和mean pooling操作不同点在于需要记录下池化操作时到底哪个像素的值是最大,也就是max id,这个变量就是记录最大值所在位置的,因为在反向传播中要用到。

    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多