欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

python中使用matlabplit绘制动态直方图

程序员文章站 2024-01-17 22:41:16
...
  1. 通过这样的动态作图,可以用来分析深度学习模型的性能。
    示例代码1:
    动态直方图
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
y1 = []
for i in range(50):
    y1.append(i)  # 每迭代一次,将i放入y1中画出来
    ax.cla()   # 清除键,清除所有的数值
    #ax.bar(y1, label='test', height=y1, width=0.3)
    ax.bar(y1, label='test', height=y1, width=0.3)
    ax.legend()
    plt.pause(0.1)


示例代码2:两条动态曲线叠加

# 测试程序
import numpy as np
import matplotlib.pyplot as plt

x = np.arange(0, 2 * np.pi, 0.1)
y1 = np.sin(x)
y2 = np.cos(x)

plt.figure(1)
plt.plot(x, y1)
plt.pause(2)  #在figure(1)上绘制sin曲线,2s后自动关闭窗口

plt.figure(1)
plt.plot(x, y2)
plt.pause(2)  #在figure(1)上绘制cos曲线,2s后自动关闭窗口
#plt.show()
plt.close()

示例代码3:
统计直方图比较

import numpy as np

import matplotlib.pyplot as plt

men_means = (20, 35, 30, 35, 27)

women_means = (25, 32, 34, 20, 25)

ind = np.arange(len(men_means))  # the x locations for the groups

width = 0.35  # the width of the bars

fig, ax = plt.subplots()

rects1 = ax.bar(ind - width / 2, men_means, width, color='SkyBlue', label='Men')

rects2 = ax.bar(ind + width / 2, women_means, width, color='IndianRed', label='Women')

# Add some text for labels, title and custom x-axis tick labels, etc.

ax.set_ylabel('Scores')

ax.set_title('Scores by group and gender')

plt.xticks(ind, ('G1', 'G2', 'G3', 'G4', 'G5'))

ax.legend()

plt.show()

示例代码4:
综合示例代码1和示例代码3
该动态直方图比较,可以用来观察深度学习模型中分类的准确率的比较

#该行代码必须放在循环外,只允许建立一次,如果放到循环里,会被反复建立多次
import numpy as np
import random
import matplotlib.pyplot as plt

EPOCH = 100
#标签索引
ind = np.arange(10)
#直方图宽度
width = 0.5
#产生随机整数值数组
def random_int_list(start, stop, length):
#start:下限     stop:上限    length:数组长度
  start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
  length = int(abs(length)) if length else 0
  random_list = []
  for i in range(length):
    random_list.append(random.randint(start, stop))
  return random_list

#模拟样本标签值数组,必须放到循环外面,只初始化一次
label_list = random_int_list(10, 100, 10)
#只产生一个figure,放到循环外面
fig, ax = plt.subplots()
for epoch in range(EPOCH):
        ax.cla()#清除所有数值
        #模拟模型每次的预测输出值
        predicted_list = random_int_list(10, 100, 10)
        rects1 = ax.bar(ind - width / 2, label_list, width, color='IndianRed', label='label_list')
        rects2 = ax.bar(ind + width / 2, predicted_list, width, color='SkyBlue', label='predicted_list')
        ax.legend()
        plt.pause(0.1)

示例代码5:全连接模型训练代码

import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from tqdm import *
import matplotlib.pyplot as plt

#定义一些参数
EPOCH = 1000#训练轮次
LR = 0.01  #learing rate
class_number = 10 #类别数
stop_threshold = 0.99  #训练截止的条件之一:准确率阈值,当准确率超过该阈值,则停止训练
sample_sum = 1000#样本数目
model_input = 1000 #模型输入的特征个数
#标签编码,将样本中每个随机数据的1000个随机特征值求均值,将均值分为10个区间,对应标签值为0到9
def code_label(data):
    data_label = np.sum(data, 1) #将每一行的元素相加,压缩行
    data_label = np.around((data_label /model_input), decimals= 2)#decimals = 2: 保留小数点几位
    min_label = np.around(min(data_label), decimals=2)
    max_label = np.around(max(data_label), decimals=2)
    step = np.around((max_label - min_label) / class_number, decimals=2)
    section_lower = min_label #样本输出最小值
    data_label_code = np.zeros((len(data_label), class_number))

    for i in range(class_number):
        section_upper = section_lower + step
        for index in range(len(data_label)):
            if (data_label[index] >= section_lower)&(data_label[index] < section_upper):
                data_label_code[index][i] = 1

        section_lower = section_upper

    return data_label_code

#定义全连接层
class Linear(nn.Module):
    def __init__(self, in_features,out_features):
        super(Linear, self).__init__()
        w = torch.empty(in_features, out_features)
        #随机初始化
        #self.w = nn.Parameter(torch.randn(in_features, out_features))
        # Xavier 正态分布初始化
        #nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
        nn.init.xavier_normal_(w, gain=1)
        #self.w = nn.Parameter(w)
        #凯明初始化
        #nn.init.kaiming_normal_(w, a=0, mode='fan_out', nonlinearity='relu')
        #self.w = nn.Parameter(w)
        #self.w = nn.Parameter(torch.from_numpy(np.random.randn(in_features, out_features) / np.sqrt(out_features*0.5)).float())
        #零初始化
        self.w = nn.Parameter(torch.ones(in_features, out_features))
        self.b = nn.Parameter(torch.zeros(out_features))
    def forward(self,x):
        x = x.mm(self.w)
        return x + self.b.expand_as(x)

#定义网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.layer1 = Linear(1000, 800)
        self.layer2 = Linear(800, 500)
        self.layer3 = Linear(500, 300)
        self.layer4 = Linear(300, 200)
        self.layer5 = Linear(200, 100)
        self.layer6 = Linear(100, 80)
        self.layer7 = Linear(80, 10)


    def forward(self,x):
        x = self.layer1(x)
        x = torch.relu(x)
        x = self.layer2(x)
        x = torch.relu(x)
        x = self.layer3(x)
        x = torch.relu(x)
        x = self.layer4(x)
        x = torch.relu(x)
        x = self.layer5(x)
        x = torch.relu(x)
        x = self.layer6(x)
        x = torch.relu(x)
        x = self.layer7(x)
        x = torch.relu(x)
        #x = F.softmax(x, dim=1)
        return(x)

#模型初始化
net = Net()
#定义loss函数
#loss_func = torch.nn.MSELoss()
#查看模型的初始化参数
for parameters in net.parameters():
    print(parameters)
for name,parameters in net.named_parameters():
    print(name,':',parameters.size())

loss_func = torch.nn.CrossEntropyLoss()
losses_his = []   # 记录不同epoch下的损失函数
#定义优化方法
opt = torch.optim.SGD(net.parameters(), lr=LR)
loss_hist = []
corr = []

#生成数据
init_seed = 1
np.random.seed(init_seed)
data = np.random.randn(sample_sum, 1000)
# 对生成数据标签编码
data_label_code = code_label(data)
b_x = torch.from_numpy(data)  # 务必要用 Variable 包一下

b_x = torch.tensor(b_x, dtype=torch.float32)
b_x = Variable(b_x)

b_y = torch.from_numpy(data_label_code)
_, labels = torch.max(b_y, dim=1)
b_y = torch.tensor(b_y, dtype=torch.float32)
b_y = Variable(b_y)

fig, ax = plt.subplots()
for epoch in tqdm(range(EPOCH)):
    model_corr = 0
    output = net(b_x)
    output = output.view(len(b_y), class_number)
    _, predicted = torch.max(output, dim=1)
    loss = loss_func(output, labels)  # compute loss for every net
    losses_his.append(loss.data)
    opt.zero_grad()  # clear gradients for next train
    loss.backward()  # backpropagation, compute gradients
    opt.step()
    #计算准确率
    model_corr += (predicted == labels).sum()
    model_corr = model_corr.numpy()
    corr.append(model_corr/len(b_y))
    if epoch % 100 == 99:
        print('%5d epoch loss: %.3f'\
              %(epoch, loss.data))
        #print(model_corr)
        print('%5d epoch correc_rate: %.3f' \
              % (epoch, model_corr/len(b_y)))

        arr_labels = labels.numpy()
        arr_labels = arr_labels.tolist()
        label_dict = {}
        for i in set(arr_labels):
            label_dict[i] = arr_labels.count(i)
        label_list = list(label_dict.values())


        arr_predicted = predicted.numpy()
        arr_predicted = arr_predicted.tolist()
        predicted_dict = {}
        for i in set(arr_labels):
            predicted_dict[i] = arr_predicted.count(i)
        predicted_list = list(predicted_dict.values())

        ind = np.arange(10)
        width = 0.5

        ax.cla()
        rects1 = ax.bar(ind - width / 2, label_list, width, color='IndianRed', label='label_list')
        rects2 = ax.bar(ind + width / 2, predicted_list, width, color='SkyBlue', label='predicted_list')
        ax.legend()
        plt.pause(0.1)
        model_corr_rate = model_corr/sample_sum
        if model_corr_rate > stop_threshold:
            break



plt.show()
#print(len(losses_his))
plt.plot(losses_his, label='loss')
plt.legend(loc = 'best')
plt.xlabel('steps')
plt.ylabel('loss')
#纵轴坐标范围
plt.ylim((0, max(losses_his)))
plt.show()

plt.plot(corr, label='corr')
plt.legend(loc = 'best')
plt.xlabel('steps')
plt.ylabel('corr')
#纵轴坐标范围
plt.ylim((0, 1))
plt.show()

参考链接:
https://blog.csdn.net/xyisv/article/details/80651334
https://blog.csdn.net/beyond9305/article/details/82958683