第P5周—运动鞋识别

2023-09-15 19:50:47

一、前期工作

1.导入数据集

数据集:工作台 - Heywhale.com

import torch
import matplotlib.pyplot as plt
from torchvision import transforms, datasets
import os, PIL, random, pathlib


data_dir = r'D:\P5-data\test'
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))
classNames = [str(path).split("\\")[3]for path in data_paths]
print(classNames)

 2.数据集划分

在Windows上,PyTorch的多进程数据加载有一些限制和问题,所以我们使用num_workers=0:在数据加载器创建时,将num_workers参数设置为0,这会禁用多进程数据加载。这是一个简单的解决方法,但可能会降低数据加载的速度。

torchvision.transforms.Compose()详解【Pytorch入门手册】_K同学啊的博客-CSDN博客

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  
    transforms.ToTensor(),          
    transforms.Normalize(           
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  
])

test_transform = transforms.Compose([
    transforms.Resize([224, 224]),  
    transforms.ToTensor(),          
    transforms.Normalize(           
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  
])

train_dataset = datasets.ImageFolder("D:/P5-data/train/",transform=train_transforms)
test_dataset  = datasets.ImageFolder("D:/P5-data/test/",transform=train_transforms)

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=0)

关于批次大小的选择

需要根据特定问题和数据集的特征进行调整。一般来说,常见的批次大小值为32、64、128等。选择批次大小时,建议进行实验并监测训练和验证性能,以找到适合特定任务的最佳值。 

 3.检查数据

for images, labels in test_dl:
    print("Shape of images [N, C, H, W]: ", images.shape)
    print("Shape of labels: ", labels.shape, labels.dtype)
    break

 

 

 二、构建神经网络

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1=nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.conv2=nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.pool3=nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.conv4=nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.conv5=nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.pool6=nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.dropout=nn.Sequential(
            nn.Dropout(0.5)
        )

        self.fc=nn.Sequential(
            nn.Linear(24*50*50, len(classNames)),
            nn.Dropout(0.3)
        )

    def forward(self, x):

        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool6(x)
        x = self.dropout(x)
        # print(x.shape)
        x = x.view(batch_size, -1)  # flatten 编程全连接网络需要的输入(batch, 24*50*50_4
        x = self.fc(x)
        x = self.dropout(x)

        return x


device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = Model().to(device)
print(model)

三、训练模型

1.动态学习率

def adjust_learning_rate(optimizer, epoch, start_lr):

    lr = start_lr * (0.92 ** (epoch // 2))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

learn_rate = 1e-4 # 初始学习率
optimizer  = torch.optim.SGD(model.parameters(), lr=learn_rate)

2.训练函数

# 训练循环
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)  # 训练集的大小
    num_batches = len(dataloader)   # 批次数目, (size/batch_size,向上取整)

    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss

 3.测试函数

def test (dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

4.正式训练

loss_fn = nn.CrossEntropyLoss()  # 创建损失汉书
epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    # 更新学习率(使用自定义学习率使用)
    adjust_learning_rate(optimizer, epoch, learn_rate)

    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)

    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model,  loss_fn)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,
                          epoch_test_acc * 100, epoch_test_loss, lr))
print('Done')

 

四、结果可视化

import matplotlib.pyplot as plt
# 隐藏警告
import warnings
warnings.filterwarnings("ignore")  # 忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False    # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100              # 分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

 

五、完整代码

import torch
import matplotlib.pyplot as plt
from torchvision import transforms, datasets
import os, PIL, random, pathlib


data_dir = r'D:\P5-data\test'
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))
classNames = [str(path).split("\\")[3]for path in data_paths]
print(classNames)

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  
    transforms.ToTensor(),          
    transforms.Normalize(           
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  
])

test_transform = transforms.Compose([
    transforms.Resize([224, 224]),  
    transforms.ToTensor(),          
    transforms.Normalize(           
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  
])

train_dataset = datasets.ImageFolder("D:/P5-data/train/",transform=train_transforms)
test_dataset  = datasets.ImageFolder("D:/P5-data/test/",transform=train_transforms)

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=0)

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1=nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.conv2=nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.pool3=nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.conv4=nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.conv5=nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.pool6=nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.dropout=nn.Sequential(
            nn.Dropout(0.5)
        )

        self.fc=nn.Sequential(
            nn.Linear(24*50*50, len(classNames)),
            nn.Dropout(0.3)
        )

    def forward(self, x):

        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool6(x)
        x = self.dropout(x)
        # print(x.shape)
        x = x.view(batch_size, -1)  # flatten 编程全连接网络需要的输入(batch, 24*50*50_4
        x = self.fc(x)
        x = self.dropout(x)

        return x


device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = Model().to(device)


def adjust_learning_rate(optimizer, epoch, start_lr):
    # 每2个epoch衰减到原来的0.92
    lr = start_lr * (0.92 ** (epoch // 2))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

learn_rate = 1e-4  # 初始学习率
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)



# 训练循环
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)  # 训练集的大小
    num_batches = len(dataloader)   # 批次数目, (size/batch_size,向上取整)

    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss

def test (dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

loss_fn = nn.CrossEntropyLoss()  # 创建损失汉书
epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    # 更新学习率(使用自定义学习率使用)
    adjust_learning_rate(optimizer, epoch, learn_rate)

    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)

    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model,  loss_fn)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,
                          epoch_test_acc * 100, epoch_test_loss, lr))
print('Done')

import matplotlib.pyplot as plt
# 隐藏警告
import warnings
warnings.filterwarnings("ignore")  # 忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False    # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100              # 分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

 

更多推荐

Linux开发工具之项目自动化构建工具-make/Makefile

make是一条命令,makefile是一个文件,两个搭配使用,完成项目自动化构建makefile带来的好处就是自动化编译,一旦写好,只需要一个make命令,整个工程完全自动编译,极大的提高了软件开发的效率下面来看一个实例:在Makefile文件里可以如上图一样g++/gcc编译代码一步到位,也可以如下图一样,一步一步拆

滨州ITSS认证流程,认证条件

ITSS认证流程,认证条件一、ITSS的意义ITSS认证——信息技术服务标准,是在工业和信息化部、国家标准化委的领导和支持下,由ITSS工作组研制的一套IT服务领域的标准库和一套提供IT服务的方法论。ITSS认证-信息技术服务标准是一套成体系和综合配套的信息技术服务标准库,全面规范了IT服务产品及其组成要素,用于指导实

css --- 让人上头的flex

用自己的理解记录下flex的知识点,部分文字描述是在学习其他作者分享的文章觉得很好而使用的,不是无脑搬运,是学习和借鉴!一、基本原理二、flex容器和项目三、轴线四、flex属性4.1属性汇总4.2属性分类4.3属性详解(1)flex-direction(2)flex-wrap(3)flex-flow(4)justif

VLAN的配置实例

基于端口的vlan划分以下配置为例:注意选择copper线,可以任意选择端口PC配置如下:LSW1配置如下[LSW1]vlan10[LSW1-vlan10]qu[LSW1]interfaceg0/0/9[LSW1-Gigabitethernet0/0/9]portlink-typeacces[LSW1-Gigabite

【脑机接口论文与代码】 基于自适应FBCCA的脑机接口控制机械臂

Brain-ControlledRoboticArmBasedonAdaptiveFBCCA基于自适应FBCCA的脑机接口控制机械臂论文下载:算法程序下载:摘要1项目介绍2方法2.1CCA算法2.2FBCCA算法2.3自适应FBCCA算法3数据获取4结果4.1脑地形图4.2频谱图4.3准确率5结论基于自适应FBCCA的

2023最新如何轻松升级、安装和试用Navicat Premium 16.2.10 教程详解

🌷🍁博主猫头虎(🐅🐾)带您GotoNewWorld✨🍁🦄博客首页——🐅🐾猫头虎的博客🎐🐳《面试题大全专栏》🦕文章图文并茂🦖生动形象🐅简单易学!欢迎大家来踩踩~🌺🌊《IDEA开发秘籍专栏》🐾学会IDEA常用操作,工作效率翻倍~💐🌊《100天精通Golang(基础入门篇)》🐅学会Gol

Linux学习第16天:Linux设备树下的LED驱动开发:举一反三 专注专心专业

Linux版本号4.1.15芯片I.MX6ULL大叔学Linux品人间百味思文短情长在开题之前,先说一下这次的题目,尤其是后面的“举一反三专注专心专业”到底想给大家传递什么信息。LED驱动开发,目前为止已经学了好几种方法,包括裸机开发、嵌入式LinuxLED驱动开发以及基于API函数的LED驱动开发,再加上今天要学习的

基于Java的养老院管理系统的设计与实现(亮点:多角色、登录验证码、留言反馈)

养老院管理系统一、前言二、我的优势2.1自己的网站2.2自己的小程序(小蔡coding)2.3有保障的售后2.4福利三、开发环境与技术3.1MySQL数据库3.2Vue前端技术3.3SpringBoot框架3.4微信小程序四、功能设计4.1主要功能描述五、系统实现5.1养老院老人功能5.1.1饮食喜好5.1.2体检结果

为何学linux及用处

目前企业使用的操作系统无非就是国产类的,windows和linux类。我们要提升自己的技能,需要学习这两款。我记得在大学时期,学习过windows以及linux,但当时觉得又不常用,就学的模棱两可。毕业之后,你会发现,其实这两种操作系统是很主流的。为什么学?下面就是一些工作中遇到的例子分享一下。我记得在企业中有次遇到数

jvm深入研究文档--整体概念

阿丹:精通JVM对于一个java工程师非常重要,要是深入了解了jvm就可以有效的面对下面的问题程序调优:JVM的配置和调优对于程序的运行有着至关重要的影响。不同的业务场景需要不同的JVM配置,比如设置不同的垃圾收集器、调整新生代和老生代的内存配置和占比等。只有深入理解JVM,才能针对不同情况进行有效的调优,以满足程序高

C++面经之多态|多态的原理|虚函数

文章目录目录一、多态的概念1.概念二、多态的定义及实现1.多态的构成条件2.虚函数3.虚函数的重写虚函数重写的两个例外:4.c++11中的override和final5.重载、覆盖(重写)、隐藏(重定义)对比三、抽象类1.概念2.接口继承和实现继承四、多态的原理1.虚函数表2.多态的原理3.动态绑定与静态绑定五、单继承

热文推荐