脉冲神经网络 学习算法-Tempotron
程序员文章站
2024-03-14 11:37:16
...
脉冲神经网络 学习算法-Tempotron
一.Tempotron算法
今天读了一下《The tempotron : a neuron that learns spike timing based decisions》这篇论文,然后找了网上tempotron算法的demo。代码是参考 脉冲神经网络之Tempotron(二),添加了精度图像绘制。代码即运行结果如下:
二.代码-matlab
- 百度云代码链接 提取码:4jox
function TempotronClassify()
% Tempotron: a neuron that learns spike timing-based decisions
% Rober Gutig 2006 Nature Neuroscience
clear;
clc;
NumImages = 26;
for i = 1:NumImages
ImageName = strcat('Icon16X16\Letter-',char('A'+i-1),'-black-icon');% 从icon16X16文件夹中读取所有图片 strcat横向连接字符串
%imread用來读取图片
ImageMatrix = imread(ImageName,'bmp');% 读取图片为灰度图,保存在矩阵中(黑色为0,白色为1)
ImageMatrix = ~ImageMatrix; % 取反 白色为0,黑色为1
TrainPtns(:,i) = image2ptn(ImageMatrix); %image2ptn对外界刺激进行编码,也就是将图片数据转换成脉冲序列的形式%
end
TrainPtns = TrainPtns * 1e-3; % scale to ms
nAfferents = size(TrainPtns,1); %nAfferents輸入脉冲的数量32
nPtns = NumImages; %26
nOutputs = 5; %5个二进制数表示一个字母的编号
loadData=0;% 是否载入已保存的模型
V_thr = 1; V_rest = 0;
T = 256e-3; % pattern duration ms 时间窗口取256ms
dt = 1e-3; %单位时间为1ms
tau_m = 20e-3; % 膜时间常数
tau_s = tau_m/4; %突触时间常数 4倍关系
% 公式:K(t-ti)=V0(exp[-(t-ti)/τ]–exp[-(t-ti)/τs])
% (0:dt:3*tau_m)产生间隔为0.001,0-0.06的数值
aa = exp(-(0:dt:3*tau_m)/tau_m)-exp(-(0:dt:3*tau_m)/tau_s); %(exp[-(t-ti)/τ]–exp[-(t-ti)/τs])
%V0 = 1/max(exp(-(0:dt:3*tau_m)/tau_m)-exp(-(0:dt:3*tau_m)/tau_s));
V0=1/max(aa)
lmd = 2e-2; %类比学习率 %1e-2/V0; % optimal performance lmd=3e-3*T/(tau_m*nAfferents*V0) 1e-4/V0;
maxEpoch = 200; %200次训练
mu = 0.99; % momentum factor
% generate patterns (each pattern consists one spik-e per afferent)
if loadData ==0 %初始化网络
weights = 1e-2*randn(nAfferents,nOutputs); % 1e-3*randn(nAfferents,1);
save('weights0','weights');
else
load('weights0','weights');
end
%Class = logical(eye(nOutputs)); % desired class label for each pattern
%Class = false(1,26); Class(26)=true;
Class = de2bi(1:26,'left-msb'); %26个分类,用5位二进制表示
Class=Class';
correctRate=zeros(1,maxEpoch);
dw_Past=zeros(nAfferents,nPtns,nOutputs); % momentum for accelerating learning.上一个权重的更新,用于动量计算
for epoch=1:maxEpoch
Class_Tr = false(nOutputs,nPtns); % actual outputs of training
for pp=1:nPtns %26个样本
% Class_Tr = false(nOutputs,1); % actual outputs of training
for neuron=1:nOutputs %5个输出神经元分别进行计算
Vmax=0; tmax=0;
fired=false;
Vm1=zeros(1,256); indx1= 1; % trace pattern 1 记录了此输出神经元膜电位(在一个时间窗口内)的变化
for t=dt:dt:T %对每个样本的每个输出神经元的每个时间步长进行计算
Vm = 0;
if fired==false
Tsyn=find(TrainPtns(:,pp)<=t+0.1*dt); % no cut window
else
Tsyn=find(TrainPtns(:,pp)<=t_fire+0.1*dt); % shut down inputs
end
if ~isempty(Tsyn)
A1=TrainPtns(:,pp);
A2=A1(Tsyn);
K =V0*(exp(-(t-A2)/tau_m)-exp(-(t-A2)/tau_s)); % the kernel value for each fired afferent
A1=weights(:,neuron);
firedWeights=A1(Tsyn);
Vm = Vm + firedWeights'*K ;%Weights'*K记录了此输出神经元膜电位(在一个时间窗口内)的变化
end
Vm = Vm + V_rest;
if Vm>=V_thr && fired==false % fire
fired=true;
t_fire=t;
Class_Tr(neuron,pp)=true;
end
if Vm>Vmax
Vmax=Vm; tmax=t;
end
%if pp==1
Vm1(indx1)=Vm;
indx1=indx1+1;
%end
end
%if pp==1
%figure(1); plot(dt:dt:T,Vm1);
%title(strcat('Image ',char('A'+pp-1),'; neuron: ',num2str(neuron))); drawnow;
%end
if Vmax <= 0
tmax=max(TrainPtns(:,pp));
end
if Class_Tr(neuron,pp)~=Class(neuron,pp) % 神经元的输出和实际输出不相符时
Tsyn=find(TrainPtns(:,pp)<=tmax+0.1*dt);
if ~isempty(Tsyn)
A1=TrainPtns(:,pp);
A2=A1(Tsyn);
K =V0*(exp(-(tmax-A2)/tau_m)-exp(-(tmax-A2)/tau_s)); % the kernel value for each fired afferent
A1=weights(:,neuron);
dwPst=dw_Past(:,pp,neuron);
if fired==false % LTP 增大神经元的刺激,权重应该增加:论文中公式2
Dw=lmd*K;
else % LTD 抑制脉冲发放,于是权重应该减小
Dw=-1.1*lmd*K;
end
A1(Tsyn) = A1(Tsyn) + Dw + mu*dwPst(Tsyn);
weights(:,neuron)=A1;
dwPst(Tsyn)=Dw;
dw_Past(:,pp,neuron) = dwPst;
end
end
end % end of one neuron computation
end % end of one image
%CC=isequal(Class,Class_Tr);
%correctRate(epoch)=sum(Class==Class_Tr)/length(Class);
C = bi2de(Class','left-msb');
CT = bi2de(Class_Tr','left-msb');
CC= (C==CT);
a = sum(CC)/length(CC);
correctRate(epoch)=mean(a);
end
save('TrainedWt','weights');
figure(2); plot(1:maxEpoch,correctRate,'-b.');
end
%%将图片编码为脉冲序列并保存在向量中
function spikeTrain = image2ptn(A)
%% convert a image to a spike train saved in a vector
RandParts = 1;
A1 = A'; %’行向量变为列向量,便于计算,满足矩阵运算规则。
B = [A1(:);A(:)];%按列排放
numPixels = length(B);
numInputNeurons = numPixels/8; % 64 neurons
spikeTrain = zeros(numInputNeurons,1);
for i = 1:numInputNeurons
B1 = B((1+(i-1)*8):(8+(i-1)*8));%取B的连续8位 例如当i=1,则B1=[0 0 0 0 0 0 0 0 ]^T
B1 = B1';
spikeTime = bi2de(B1,'left-msb'); %将二进制转为十进制
if spikeTime == 0
spikeTime = 2^8; % put 0 to the end of the interval 没get到
end
spikeTrain(i) = spikeTime;
end
spikeTrain = zeros(32,1);
if RandParts == 1
loadR = 1;
AR = A(:);
if loadR == 0
R = randperm(size(A,1)*size(A,2));
save('RandIndex','R');
else
load('RandIndex','R');
end
numRandNeus = 32;
for i = 1:numRandNeus
IndexR = R((1+(i-1)*8):(8+(i-1)*8));
Bits = AR(IndexR); %按照IndexR为索引,从AR中取值01000001
Bits = Bits';
spikeTime = bi2de(Bits,'left-msb'); % 二进制转十进制 65
if spikeTime == 0
spikeTime = 2^8; % put 0 to the end of the interval
end
spikeTrain(i) = spikeTime;
end
end
end
运行结果
上一篇: 字节对齐、8字节对齐