IT数码 购物 网址 头条 软件 日历 阅读 图书馆
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
图片批量下载器
↓批量下载图片,美女图库↓
图片自动播放器
↓图片自动播放器↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁
 
   -> 人工智能 -> 遗传算法(GA/NSGA)优化神经网络 GA-BP -> 正文阅读

[人工智能]遗传算法(GA/NSGA)优化神经网络 GA-BP

目录

利用 GA 求神经网络最优的learning rate和隐藏层的神经元个数? ? ? ?GA-NET v1.0.py

对于图片的识别,进行了相关优化,加入了池化操作? ? ? ? ?GA-NET v2.0.py

在神经网络中也加入GA来加快神经网络训练速度? ? ? ? ? ?GA-NET v3.0.py

以上均为回归损失计算方法,要计算分类损失只需要修改如下代码。


?

查了网上一些论文和代码,自己写了三个版本的GA-BP优化代码,从简到繁,从易到难。该文章代码基于Python3 pytorch进行编写。

说明:主要便于方便代入自己的数据所以写了如下代码。自己用的时候主要可以修改Net中的网络结构,Train中的load_data变成自己要读的文件,选用合适的损失函数等等。geatpy为国内大佬写的遗传算法库,这里假设读者已经会用。关于GA和NSGA的区别只在于代码中运用模板的区别。

代码都有注释,可以试着读一读。代码gpu支持暂没测试与优化。

可供测试data文件。测试文件说明:最后一列为label,除最后一列外为data。

三个版本对比:对上述测试文件对R^2 >= 0.96为指标,在我的破电脑上运行时间v1.0≈1min,v2.0≈45s,v3.0≈20s,我没有测试过别的例子的速度,对于v3.0不能保证每个例子都能用,有一定缺陷,不同问题可以选用不同的版本进行使用。

利用 GA 求神经网络最优的learning rate和隐藏层的神经元个数? ? ?GA-NET v1.0.py

import torch.nn as nn
import torch
import geatpy as ea
import numpy as np
import os
from sklearn.model_selection import train_test_split

input_dimension = 7
output_dimension = 1


# 自定义网络
class Net(nn.Module):
    def __init__(self, neurons_num):
        super(Net, self).__init__()
        self.hidden0 = torch.nn.Linear(input_dimension, neurons_num)
        self.hidden1 = torch.nn.Linear(neurons_num, neurons_num)
        self.hidden2 = torch.nn.Linear(neurons_num, neurons_num)
        self.hidden3 = torch.nn.Linear(neurons_num, output_dimension)

    def forward(self, x):
        x = torch.relu(self.hidden0(x))
        x = torch.relu(self.hidden1(x))
        x = torch.relu(self.hidden2(x))
        x = self.hidden3(x)
        return x


# r^2 函数
def r2(y_test, y):
    return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum()


# 神经网络训练
class Train:
    train_x, train_y, test_x, test_y, model, lr, neurons_num, x, y, optimizer = None, None, None, None, None, None, None, None, None, None

    def __init__(self):
        self.use_gpu = torch.cuda.is_available()
        # 选用合适的 loss function
        # self.loss_fn = torch.nn.CrossEntropyLoss()
        self.loss_fn = torch.nn.MSELoss()
        self.load_data()

    # 自定义读入数据
    def load_data(self):
        with open('data.csv') as f:
            df = np.loadtxt(f, delimiter=",", skiprows=0)
            self.x = df[:, :-1]
            self.y = df[:, -1:]
        f.close()

    # 重新创建不一样的 train and test data set
    def reload(self, learing_rate, neurons_num):
        train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42)
        self.train_x = torch.from_numpy(train_x).float()
        self.train_y = torch.from_numpy(train_y).float()
        self.test_x = torch.from_numpy(test_x).float()
        self.test_y = torch.from_numpy(test_y).float()
        self.model = Net(neurons_num)
        if self.use_gpu:
            self.train_x, self.train_y, self.test_x, self.test_y = self.train_x.cuda(), self.train_y.cuda(), self.test_x.cuda(), self.test_y.cuda()
            self.model = self.model.cuda()
            self.loss_fn = self.loss_fn.cuda()
        self.lr = learing_rate
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)

    # 训练神经网络,返回 r^2 值
    def train(self, n=10):
        for epoch in range(n):
            model_output = self.model(self.train_x)
            loss = self.loss_fn(model_output, self.train_y)
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
        model_output = self.model(self.test_x)
        return float(r2(model_output.data, self.test_y))


# 自定义 GA,对 learning rate and neurons num 经行改变
class My_nsga(ea.Problem):
    def __init__(self, epoch):
        if "result" not in os.listdir():
            os.makedirs("./result")
        name = 'GA-NET'
        M = 1
        maxormins = [-1] * M
        Dim = 2
        varTypes = [1] * Dim
        lb = [10, 10]
        ub = [5000, 100]
        lbin = [1] * Dim
        ubin = [1] * Dim
        self.epoch = epoch
        self.train = Train()

        ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)

    # 目标函数即神经网络返回值
    def evalVars(self, Vars):
        ans = np.zeros(len(Vars), dtype=float).reshape(len(Vars), 1)
        for i in range(len(Vars)):
            self.train.reload(Vars[i][0] / 100000, Vars[i][1])
            # 括号内参数表示单个神经网络训练次数
            data = self.train.train(self.epoch)
            print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], data))
            torch.save(self.train.model, "./result/lr={}num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))
            # 达到一定准确率停止
            if data >= 1:
                torch.save(self.train.model, "lr{}=num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))
                exit("Find!")
            ans[i][0] = data
        return ans


# 运行 GA
class Run_nsga:
    def __init__(self, epoch=10, ndind=10, maxgen=10):
        problem = My_nsga(epoch)
        myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0)
        myAlgorithm.drawing = 0
        res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result')
        print(res)
        print(res['Vars'][0])


if __name__ == "__main__":
    # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数
    Run_nsga(10000, 10, 30)

对于图片的识别,进行了相关优化,加入了池化操作? ? ?GA-NET v2.0.py

import torch.nn as nn
import torch
import geatpy as ea
import numpy as np
import os
from sklearn.model_selection import train_test_split

input_dimension = 7
output_dimension = 1
batch_size = 500


# 自定义网络
class Net(nn.Module):
    def __init__(self, neurons_num):
        super(Net, self).__init__()
        self.hidden0 = torch.nn.Linear(input_dimension, neurons_num)
        self.hidden1 = torch.nn.Linear(neurons_num, neurons_num)
        self.hidden2 = torch.nn.Linear(neurons_num, neurons_num)
        self.hidden3 = torch.nn.Linear(neurons_num, output_dimension)

    def forward(self, x):
        x = torch.relu(self.hidden0(x))
        x = torch.relu(self.hidden1(x))
        x = torch.relu(self.hidden2(x))
        x = self.hidden3(x)
        return x


# r^2 函数
def r2(y_test, y):
    return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum()


# Data 类,以便带入自己的data
class Data(torch.utils.data.Dataset):
    def __init__(self, data, label):
        # 可选择归一化操作
        # data = (data - data.min(axis=0)) / (data.max(axis=0) - data.min(axis=0))
        # label = (label - label.min(axis=0)) / (label.max(axis=0) - label.min(axis=0))
        self.x = data
        self.y = label
        self.len = len(self.y)

    def __len__(self):
        return self.len

    def __getitem__(self, item):
        return self.x[item], self.y[item]


# 神经网络训练
class Train:
    trainsetloader, test_x, test_y, model, lr, neurons_num, optimizer, x, y = None, None, None, None, None, None, None, None, None

    def __init__(self):
        self.use_gpu = torch.cuda.is_available()
        # 选用合适的 loss function
        # self.loss_fn = torch.nn.CrossEntropyLoss()
        self.loss_fn = torch.nn.MSELoss()
        self.load_data()

    # 自定义读入数据
    def load_data(self):
        with open('data.csv') as f:
            df = np.loadtxt(f, delimiter=",", skiprows=0)
            self.x = df[:, :-1]
            self.y = df[:, -1:]
        f.close()
        # 创建 train and test
        self.re_data_split()

    # 重新创建不一样的 train and test data set,便于带入到 reload 函数中
    def re_data_split(self):
        train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42)
        train_x = torch.from_numpy(train_x).float()
        train_y = torch.from_numpy(train_y).float()
        # test 数据
        self.test_x = torch.from_numpy(test_x).float()
        self.test_y = torch.from_numpy(test_y).float()
        trainset = Data(train_x, train_y)
        # train 池化
        self.trainsetloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False)

    # 重新载入 learning rate 和 neurons num
    def reload(self, learning_rate, neurons_num):
        # 可选择是否重新分类 test and train
        # self.re_data_split()
        self.model = Net(neurons_num)
        if self.use_gpu:
            self.model = self.model.cuda()
            self.loss_fn = self.loss_fn.cuda()
        self.lr = learning_rate
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)

    # 训练神经网络,返回 r^2 值
    def train(self, n=10) -> float:
        for epoch in range(n):
            for i, (batch_x, batch_y) in enumerate(self.trainsetloader):
                model_output = self.model(batch_x)
                loss = self.loss_fn(model_output, batch_y)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            model_output = self.model(self.test_x)
            print(float(r2(model_output.data, self.test_y)))
        model_output = self.model(self.test_x)
        return float(r2(model_output.data, self.test_y))


# 自定义 GA,对 learning rate and neurons num 经行改变
class My_nsga(ea.Problem):
    def __init__(self, epoch):
        if "result" not in os.listdir():
            os.makedirs("./result")
        name = 'GA-NET'
        M = 1
        maxormins = [-1] * M
        Dim = 2
        varTypes = [1] * Dim
        lb = [10, 10]
        ub = [500, 100]
        lbin = [1] * Dim
        ubin = [1] * Dim
        self.count = 1
        self.epoch = epoch
        self.train = Train()

        ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)

    # 目标函数即神经网络返回值
    def evalVars(self, Vars):
        ans = np.zeros(len(Vars), dtype=float).reshape(len(Vars), 1)
        for i in range(len(Vars)):
            self.train.reload(Vars[i][0] / 100000, Vars[i][1])
            # 括号内参数表示单个神经网络训练次数
            data = self.train.train(self.epoch)
            print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], data))
            torch.save(self.train.model, "./result/lr={}num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))
            # 达到一定准确率停止
            if data >= 1:
                torch.save(self.train.model, "lr{}=num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))
                exit("Find!")
            ans[i][0] = data
        return ans


# 运行 GA
class Run_nsga:
    def __init__(self, epoch=10, ndind=10, maxgen=10):
        problem = My_nsga(epoch)
        myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0)
        myAlgorithm.drawing = 0
        res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result')
        print(res)
        print(res['Vars'][0])


if __name__ == "__main__":
    # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数
    Run_nsga(10000, 10, 30)

在神经网络中也加入GA改变网络权值来加快神经网络训练速度? ? ?GA-NET v3.0.py

参考代码原地址:遗传算法(GA) - 优化神经网络(CNN) - pytorch(亲测可用)_Vertira的博客-CSDN博客_遗传算法优化cnn

import random
import torch.nn as nn
import torch
import geatpy as ea
import numpy as np
import os
import copy
from sklearn.model_selection import train_test_split
from torch.distributions import Categorical

input_dimension = 7
output_dimension = 1
# 该参数在数据较少的输入时也相应变少,最好使得 data_size / batch_size = NetGA_pop_size
batch_size = 100


# 自定义网络
class Net(torch.nn.Module):
    def __init__(self, neurons_num, lr):
        super(Net, self).__init__()
        self.layers = torch.nn.Sequential(
            torch.nn.Linear(input_dimension, neurons_num),
            torch.nn.ReLU(),
            torch.nn.Linear(neurons_num, neurons_num),
            torch.nn.ReLU(),
            torch.nn.Linear(neurons_num, neurons_num),
            torch.nn.ReLU(),
            torch.nn.Linear(neurons_num, output_dimension)
        )
        self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)

    def forward(self, x):
        return self.layers(x)

    def set_layer(self, layers):
        self.layers = layers


# r^2 函数
def r2(y_test, y):
    return 1 - ((y_test - y) ** 2).sum() / ((y.mean() - y) ** 2).sum()


# Data 类,以便带入自己的data
class Data(torch.utils.data.Dataset):
    def __init__(self, data, label):
        self.x = data
        self.y = label
        self.len = len(self.y)

    def __len__(self):
        return self.len

    def __getitem__(self, item):
        return self.x[item], self.y[item]


# GA 优化的神经网络训练
class NetTrainGA:
    def __init__(self, _pop_size=10, _r_mutation=0.1, _p_mutation=0.1, _elite_num=6, stddev=0.1):
        self.test_x, self.test_y, self.trainSetLoader, self.x, self.y = None, None, None, None, None  # 数据存储
        self.pop_size = _pop_size  # 种群数
        self.r_mutation = _r_mutation  # 变异里,数据变异的概率
        self.p_mutation = _p_mutation  # 变异概率
        self.elite_num = _elite_num  # 精英数
        self.chroms = []  # 储存所有 model
        self.stddev = stddev  # 网络权值步进大小的最大值
        self.criterion = nn.MSELoss()  # 计算 loss 的方法
        self.model = None  # 全局最优解 model
        self.use_gpu = torch.cuda.is_available()  # 是否可以用 cuda 加速
        self.load_data()  # 加载数据
        self.lr = 0.001  # learning rate

    # 自定义读入数据
    def load_data(self):
        with open('data.csv') as f:
            df = np.loadtxt(f, delimiter=",", skiprows=0)
            self.x = df[:, :-1]
            self.y = df[:, -1:]
        f.close()
        # 创建 train and test
        self.re_data_split()

    # 重新创建不一样的 train and test data set,便于带入到 reload 函数中
    def re_data_split(self):
        train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42)
        train_x = torch.from_numpy(train_x).float()
        train_y = torch.from_numpy(train_y).float()
        # test 数据
        self.test_x = torch.from_numpy(test_x).float()
        self.test_y = torch.from_numpy(test_y).float()
        trainSet = Data(train_x, train_y)
        # train 池化
        self.trainSetLoader = torch.utils.data.DataLoader(trainSet, batch_size=batch_size, shuffle=False)

    def reload(self, learning_rate, neurons_num):
        # 可选择是否重新分类 test and train
        # self.re_data_split
        self.lr = learning_rate
        for i in range(self.pop_size):
            net = Net(neurons_num, learning_rate)
            if self.use_gpu:
                net = net.cuda()
            self.chroms.append(net)

    # 训练神经网络,返回R^2的值
    """
    对下列博客代码进行改进
    https://blog.csdn.net/Vertira/article/details/122561056
    """
    def train(self, n):
        for epoch in range(n):
            result = [{'pop': i, 'train_acc': float("-inf")} for i in range(self.pop_size)]
            # 为种群训练不同的数据
            for step, (batch_x, batch_y) in enumerate(self.trainSetLoader):
                self.netTrain(batch_x, batch_y, (step + epoch) % self.pop_size)
            # 计算 train accuracy
            for i in range(self.pop_size):
                output = self.chroms[i](self.test_x)
                result[i]["train_acc"] = float(r2(output.data, self.test_y))
            result = sorted(result, key=lambda x: x['train_acc'], reverse=True)
            # self.model 即为类中最优解,可直接套用 test 经行预测
            self.model = self.chroms[result[0]['pop']]
            self.selection(result)
        model_output = self.model(self.test_x)
        return float(r2(model_output.data, self.test_y))

    def netTrain(self, batch_x, batch_y, now):
        model = self.chroms[now]
        optimizer = model.optimizer
        # 选择每次神经网络训练次数,这个参数影响了训练速度,但跟多时候会影响梯度,很多时候我也不知道为什么梯度就没了,所以太小梯度可能变0或None导致训练停滞,太大训练的有可能变慢
        for j in range(100):
            output = model(batch_x)
            optimizer.zero_grad()
            train_loss = self.criterion(output, batch_y).requires_grad_()
            train_loss.backward()
            optimizer.step()

    # 保留精英个数,并进行交叉操作至种群数满,最后进行变异操作
    def selection(self, result):
        elites = [e['pop'] for e in result[:self.elite_num]]
        # 保留 elites 个精英
        children = [copy.deepcopy(self.chroms[i]) for i in elites]
        # 轮盘赌来选择交配的个体,使用 softmax 处理负数问题
        prob = torch.softmax(torch.tensor([i["train_acc"] for i in result]), dim=0)
        m = Categorical(prob)
        # 随机选择两个交配直至达到种群大小
        while len(children) < self.pop_size:
            # 随机选择两个进行 self.crossover交配
            pair = [result[m.sample()]['pop'], result[m.sample()]['pop']]
            children.append(self.crossover(pair))
        del self.chroms[:]
        self.chroms[:] = children
        # 变异且不变异精英
        for i in range(self.elite_num, self.pop_size):
            # 满足变异概率
            if random.random() < self.p_mutation:
                mutated_child = self.mutation(i)
                del self.chroms[i]
                self.chroms.insert(i, mutated_child)

    def crossover(self, _selected_pop):
        if _selected_pop[0] == _selected_pop[1]:
            return copy.deepcopy(self.chroms[_selected_pop[0]])

        chrom1 = copy.deepcopy(self.chroms[_selected_pop[0]])
        chrom2 = copy.deepcopy(self.chroms[_selected_pop[1]])

        chrom1_layers = nn.ModuleList(chrom1.modules())
        chrom2_layers = nn.ModuleList(chrom2.modules())

        child = torch.nn.Sequential()
        for i in range(len(chrom1_layers)):
            layer1 = chrom1_layers[i]
            layer2 = chrom2_layers[i]
            # 对 Linear 层随机交换
            if isinstance(layer1, nn.Linear):
                child.add_module(str(i - 2), layer1 if random.random() < 0.5 else layer2)

            elif isinstance(layer1, (torch.nn.Sequential, Net)):
                pass
            else:
                child.add_module(str(i - 2), layer1)
        chrom1.set_layer(child)
        chrom1.optimizer = torch.optim.Adam(chrom1.parameters(), lr=self.lr)
        return chrom1

    def mutation(self, _selected_pop):
        child = torch.nn.Sequential()
        chrom = copy.deepcopy(self.chroms[_selected_pop])
        chrom_layers = nn.ModuleList(chrom.modules())
        # 变异比例,选择几层进行变异
        for i, layer in enumerate(chrom_layers):
            if isinstance(layer, nn.Linear):
                # 变异 Linear 层,且有一定变异比例
                if random.random() < self.r_mutation:
                    # 提取权重
                    weights = layer.weight.detach().numpy()
                    # 更改权重
                    w = weights.astype(np.float32) + np.random.normal(0, self.stddev, weights.shape).astype(np.float32)
                    # 重新设置
                    layer.weight = torch.nn.Parameter(torch.from_numpy(w))
                child.add_module(str(i - 2), layer)
            elif isinstance(layer, (torch.nn.Sequential, Net)):
                pass
            else:
                child.add_module(str(i - 2), layer)
        chrom.set_layer(child)
        chrom.optimizer = torch.optim.Adam(chrom.parameters(), lr=self.lr)
        return chrom


# 自定义 GA,对 learning rate and neurons num 经行改变
class My_nsga(ea.Problem):
    def __init__(self, epoch):
        if "result" not in os.listdir():
            os.makedirs("./result")
        name = 'GA-NET'
        M = 1
        maxormins = [-1] * M
        Dim = 2
        varTypes = [1] * Dim
        lb = [10, 10]
        ub = [500, 100]
        lbin = [1] * Dim
        ubin = [1] * Dim
        self.count = 1
        self.epoch = epoch
        self.train = NetTrainGA()

        ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)

    # 目标函数即神经网络返回值
    def evalVars(self, Vars):
        ans = np.zeros(len(Vars)).reshape(len(Vars), 1)
        for i in range(len(Vars)):
            self.train.reload(Vars[i][0] / 10000, Vars[i][1])
            # 括号内参数表示单个神经网络训练次数
            data = self.train.train(self.epoch)
            print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], round(data, 3)))
            torch.save(self.train.model, "./result/lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3)))
            # 达到一定准确率停止
            if data >= 1:
                torch.save(self.train.model, "lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3)))
                return 0
            ans[i] = float(data)
        return ans


# 运行 GA
class Run_nsga:
    def __init__(self, epoch=10, ndind=10, maxgen=10):
        problem = My_nsga(epoch)
        myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0)
        myAlgorithm.drawing = 0
        res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result')
        print(res)
        print(res['Vars'][0])


if __name__ == "__main__":
    # 括号内参数表示单个神经网络训练次数,种群数,GA迭代数
    Run_nsga(100, 10, 10)
    """
    # 也可以单独调用 NetTrainGA,设置初始参数
    netga = NetTrainGA()
    # learning rate, neurons_num
    netga.reload(0.001, 30)
    # epoch
    print(netga.train(1000))
    """

以上均为回归损失计算方法,要计算分类损失只需要修改如下代码。

详情也可见该文章

# 修改输出 dimension
output_dimension = 28
# 修改 y 维度
self.y = df[:, -1]
# 修改损失函数
self.criterion = nn.CrossEntropyLoss()
# 修改 train_y 从 float 变成 long
train_y = torch.from_numpy(train_y).long()
# 修改 train_acc 计算方法
result[i]["train_acc"] = float((output.argmax(dim=1) == self.test_y).sum()) / len(self.test_y)
# 修改 train 输出
return float((model_output.argmax(dim=1) == self.test_y).sum() / len(self.test_y))

?v3.0版本有缺陷,optimizer是整个GA优化神经网络权值项目的bug所在,该文章最后更新时间为 2022.7.13,如果有更好的改进方法欢迎一起讨论。

个人博客地址,欢迎访问icon-default.png?t=M666https://www.pancake2021.work/?p=1511

  人工智能 最新文章
2022吴恩达机器学习课程——第二课(神经网
第十五章 规则学习
FixMatch: Simplifying Semi-Supervised Le
数据挖掘Java——Kmeans算法的实现
大脑皮层的分割方法
【翻译】GPT-3是如何工作的
论文笔记:TEACHTEXT: CrossModal Generaliz
python从零学(六)
详解Python 3.x 导入(import)
【答读者问27】backtrader不支持最新版本的
上一篇文章      下一篇文章      查看所有文章
加:2022-07-17 16:23:46  更:2022-07-17 16:27:21 
 
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁

360图书馆 购物 三丰科技 阅读网 日历 万年历 2024年11日历 -2024/11/26 0:30:28-

图片自动播放器
↓图片自动播放器↓
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
图片批量下载器
↓批量下载图片,美女图库↓
  网站联系: qq:121756557 email:121756557@qq.com  IT数码