This commit is contained in:
ton
2023-09-10 11:28:40 +07:00
parent fb3e59a414
commit e7c0228313
5 changed files with 384 additions and 155 deletions

View File

@@ -414,17 +414,18 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
bestAccuracy = 0.0
finalAnswer = [0] |> device # store model prediction in (logit of choices, batch)
stop = 0
vt0 = 0.0 # store vt to compute learning progress
for epoch = 1:1000
stop == 3 ? break : false
println("epoch $epoch")
n = length(trainData)
println("n $n")
p = Progress(n, dt=1.0) # minimum update interval: 1 second
for (imgBatch, labels) in trainData # imgBatch (28, 28, 4) i.e. (row, col, batch)
for (imgBatch, labels) in trainData # imgBatch(28, 28, 4) i.e. (row, col, batch), labels(label, batch)
for rep in 1:10
stop == 3 ? break : false
#WORKING prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
# prepare image into input signal (10, 2, 784, 4) i.e. (row, col, timestep, batch)
signal = dualTrackSpikeGen(imgBatch, [0.05, 0.1, 0.2, 0.3, 0.5], noise=(true, 1, 0.1), copies=18)
if length(size(signal)) == 3
row, col, sequence = size(signal)
@@ -434,7 +435,7 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
end
# encode labels
correctAnswer = onehotbatch(labels, labelDict) # (choices, batch)
correctAnswer = onehotbatch(labels, labelDict) # (correctAnswer, batch)
# insert data into model sequencially
for timestep in 1:(sequence + thinkingPeriod) # sMNIST has 784 timestep(pixel) + thinking period = 1000 timestep
@@ -447,6 +448,7 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
if timestep == 1 # tell a model to start learning. 1-time only
model.learningStage = [1]
finalAnswer = [0] |> device
vt0 = 0.0
elseif timestep == (sequence+thinkingPeriod)
model.learningStage = [3]
else
@@ -467,9 +469,20 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# no error calculation
elseif timestep == sequence # online learning, 1-by-1 timestep
# no error calculation
elseif timestep > sequence && timestep < sequence+thinkingPeriod # collect answer
#WORKING answer time windows, collect logit to get finalAnswer
elseif timestep > sequence && timestep < sequence+thinkingPeriod
logit_cpu = logit |> cpu
logit_cpu = logit_cpu[:,1]
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
predict_cpu = logit |> cpu
finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
modelError = loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu, labels[1])
vt0 = on_vt_cpu # update vt0 for this timestep
error("DEBUG -> main $(Dates.now())")
modelError = (predict_cpu .- correctAnswer)
modelError = reshape(modelError, (1,1,:, size(modelError, 2)))
@@ -509,14 +522,28 @@ function train_snn(model, trainData, validateData, labelDict::Vector)
# # error("DEBUG -> main $(Dates.now())")
# end
elseif timestep == sequence+thinkingPeriod
elseif timestep == sequence+thinkingPeriod #TODO update code
logit_cpu = logit |> cpu
finalAnswer = length(finalAnswer) == 1 ? logit : finalAnswer .+ logit # (logit, batch)
predict_cpu = logit |> cpu
finalAnswer_cpu = finalAnswer |> cpu
on_vt_cpu = model.on_vt |> cpu
on_vt_cpu = on_vt_cpu[1,1,:,1]
# get vt of correct neuron, julia array is 1-based index
labelPosition = labels[1] + 1
on_vt_cpu = on_vt_cpu[labelPosition]
modelError = (predict_cpu .- correctAnswer)
modelError = loss(vt0, on_vt_cpu, logit_cpu, finalAnswer_cpu)
vt0 = on_vt_cpu
error("DEBUG -> main $(Dates.now())")
modelError = (logit_cpu .- correctAnswer)
modelError = reshape(modelError, (1,1,:, size(modelError, 2)))
modelError = sum(modelError, dims=3) |> device
outputError = (predict_cpu .- correctAnswer) |> device
outputError = (logit_cpu .- correctAnswer) |> device
lif_epsilonRec_cpu = model.lif_epsilonRec |> cpu
on_zt_cpu = model.on_zt |> cpu
@@ -847,6 +874,33 @@ function noiseGenerator(row, col, z; prob=0.5)
return noise
end
function loss(vt0::AbstractArray, vt1::AbstractArray, logit::AbstractArray,
finalAnswer::AbstractArray, correctAnswer::Number)
labelPosition = correctAnswer + 1 # julia array is 1-based index
# get vt of correct neuron
vt1 = vt1[labelPosition]
# get zt of correct neuron
zt = logit[labelPosition]
modelError = nothing
if zt == 1
modelError = 0.0 # already correct, no weight update
elseif vt1 > vt0 # progress increase
modelError = 1.0 - vt1
elseif vt1 == vt0 # no progress
modelError = 0.11111111 # special signal
elseif vt1 < vt0 # setback
modelError = vt0 - vt1
else
error("undefined condition line $(@__LINE__)")
end
return modelError
end
# function arrayMax(x)
# if sum(GeneralUtils.isNotEqual.(x, 0)) == 0 # guard against all-zeros array
# return GeneralUtils.isNotEqual.(x, 0)

View File

@@ -27,9 +27,10 @@ using .interface
""" version 0.0.9
Todo:
[0*] change madel error calculation in user script, (progress based)
[1] +W 90% of most active conn
[2] -W 10% of less active conn
[3] synapse reconnect delay counter
[-] add temporal summation in addition to already used spatial summation.
CANCELLED, spatial summation every second until membrane potential reach a threshold
is in itself a temporal summation.

View File

@@ -26,7 +26,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.lif_firingCounter .= 0
kfn.lif_refractoryCounter .= 0
kfn.lif_zt .= 0
kfn.lif_synapticActivityCounter .= 0
kfn.lif_synapseReconnectDelayCounter .= 0
kfn.alif_vt .= 0
kfn.alif_a .= 0
@@ -36,7 +36,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.alif_firingCounter .= 0
kfn.alif_refractoryCounter .= 0
kfn.alif_zt .= 0
kfn.alif_synapticActivityCounter .= 0
kfn.alif_synapseReconnectDelayCounter .= 0
kfn.on_vt .= 0
kfn.on_epsilonRec .= 0
@@ -77,7 +77,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapticActivityCounter,
kfn.lif_synapseReconnectDelayCounter,
)
end
@async begin
@@ -103,7 +103,7 @@ function (kfn::kfn_1)(input::AbstractArray)
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapticActivityCounter,
kfn.alif_synapseReconnectDelayCounter,
kfn.alif_epsilonRecA,
kfn.alif_a,
kfn.alif_avth,
@@ -147,7 +147,7 @@ function (kfn::kfn_1)(input::AbstractArray)
)
# get on_zt4d to on_zt
kfn.on_zt .= reduce(max, kfn.on_zt4d, dims=(1,2))
logit = reshape(kfn.on_zt, (size(input, 1), :))
logit = reshape(kfn.on_zt, (size(input, 1), :)) # (outputNeurons, batch)
return logit,
kfn.zit
@@ -171,7 +171,7 @@ function lifForward( zit::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapticActivityCounter::CuArray,
synapseReconnectDelayCounter::CuArray,
)
kernel = @cuda launch=false lifForward( zit,
@@ -191,7 +191,7 @@ function lifForward( zit::CuArray,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
GeneralUtils.linear_to_cartesian,
)
config = launch_configuration(kernel.fun)
@@ -225,7 +225,7 @@ function lifForward( zit::CuArray,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
GeneralUtils.linear_to_cartesian; threads, blocks)
end
end
@@ -248,7 +248,7 @@ function lifForward( zit,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
linear_to_cartesian,
)
i = (blockIdx().x - 1) * blockDim().x + threadIdx().x # gpu threads index
@@ -300,9 +300,9 @@ function lifForward( zit,
# count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active
synapticActivityCounter[i1,i2,i3,i4] += 1
synapseReconnectDelayCounter[i1,i2,i3,i4] += 1
else # synapse is inactive
synapticActivityCounter[i1,i2,i3,i4] += 0
synapseReconnectDelayCounter[i1,i2,i3,i4] += 0
end
end
# voltage regulator
@@ -331,7 +331,7 @@ function alifForward( zit::CuArray,
exInType::CuArray,
wRecChange::CuArray,
neuronInactivityCounter::CuArray,
synapticActivityCounter::CuArray,
synapseReconnectDelayCounter::CuArray,
epsilonRecA::CuArray,
a::CuArray,
avth::CuArray,
@@ -356,7 +356,7 @@ function alifForward( zit::CuArray,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
epsilonRecA,
a,
avth,
@@ -394,7 +394,7 @@ function alifForward( zit::CuArray,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
epsilonRecA,
a,
avth,
@@ -422,7 +422,7 @@ function alifForward( zit,
exInType,
wRecChange,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
epsilonRecA,
a,
avth,
@@ -493,9 +493,9 @@ function alifForward( zit,
# count synaptic inactivity
if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription
if !iszero(zit[i1,i2,i3,i4]) # synapse is active
synapticActivityCounter[i1,i2,i3,i4] += 1
synapseReconnectDelayCounter[i1,i2,i3,i4] += 1
else # synapse is inactive
synapticActivityCounter[i1,i2,i3,i4] += 0
synapseReconnectDelayCounter[i1,i2,i3,i4] += 0
end
end
# voltage regulator

View File

@@ -267,30 +267,32 @@ end
function learn!(kfn::kfn_1, device=cpu)
# lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapticActivityCounter =
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapseReconnectDelayCounter =
lifLearn(kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.lif_exInType,
kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapticActivityCounter,
kfn.lif_synapseReconnectDelayCounter,
kfn.lif_synapseConnectionNumber,
kfn.lif_synapticWChangeCounter,
kfn.lif_eta,
kfn.lif_vt,
kfn.zitCumulative,
device)
# alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapticActivityCounter =
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapseReconnectDelayCounter =
alifLearn(kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.alif_exInType,
kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapticActivityCounter,
kfn.alif_synapseReconnectDelayCounter,
kfn.alif_synapseConnectionNumber,
kfn.alif_synapticWChangeCounter,
kfn.alif_eta,
kfn.alif_vt,
kfn.zitCumulative,
device)
@@ -306,146 +308,178 @@ function learn!(kfn::kfn_1, device=cpu)
# error("DEBUG -> kfn learn! $(Dates.now())")
end
# function lifLearn(wRec,
# exInType,
# wRecChange,
# arrayProjection4d,
# neuronInactivityCounter,
# synapseReconnectDelayCounter,
# synapseConnectionNumber,
# synapticWChangeCounter, #TODO
# eta,
# zitCumulative,
# device)
# # merge learning weight with average learning weight of all batch
# wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
# wRec .= (exInType .* wRec) .+ wch
# arrayProjection4d_cpu = arrayProjection4d |> cpu
# wRec_cpu = wRec |> cpu
# wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
# eta_cpu = eta |> cpu
# eta_cpu = eta_cpu[:,:,:,1]
# neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
# neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
# zitCumulative_cpu = zitCumulative |> cpu
# zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# # -W if less than 10% of repeat avg, +W otherwise
# _, _, i3 = size(wRec_cpu)
# for i in 1:i3
# x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
# mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
# wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
# end
# # weak / negative synaptic connection will get randomed in neuroplasticity()
# wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# # neuroplasticity, work on CPU side
# wRec_cpu = neuroplasticity(synapseConnectionNumber,
# zitCumulative_cpu,
# wRec_cpu,
# neuronInactivityCounter_cpu,
# synapseReconnectDelayCounter_cpu)
# wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
# wRec = wRec_cpu |> device
# neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
# neuronInactivityCounter = neuronInactivityCounter_cpu |> device
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
# synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
# return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
# end
function lifLearn(wRec,
exInType,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapticActivityCounter,
synapseReconnectDelayCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
vt,
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
wRecChange_cpu = wRecChange |> cpu
wRecChange_cpu = wRecChange_cpu[:,:,:,1]
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
zitCumulative_cpu = zitCumulative_cpu[:,:,1]
# -W if less than 10% of repeat avg, +W otherwise
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapticActivityCounter[:,:,i]) / length(synapticActivityCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
#TODO neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapseReconnectDelayCounter_cpu,
= neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
wRecChange_cpu,
vt,
neuronInactivityCounter_cpu,
synapseReconnectDelayCounter_cpu)
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# # merge learning weight with average learning weight of all batch
# wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
# wRec .= (exInType .* wRec) .+ wch
# # (row, col)
# # -W if less than 10% of repeat avg, +W otherwise
# _, _, i3 = size(wRec_cpu)
# for i in 1:i3
# x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
# mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
# wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
# end
# # weak / negative synaptic connection will get randomed in neuroplasticity()
# wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapticActivityCounter_cpu)
# wRec_cpu = neuroplasticity(synapseConnectionNumber,
# zitCumulative_cpu,
# wRec_cpu,
# wRecChange_cpu,
# vt,
# neuronInactivityCounter_cpu,
# synapseReconnectDelayCounter_cpu)
# transfer data backto gpu
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticActivityCounter = synapticActivityCounter_cpu |> device
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
return wRec, neuronInactivityCounter, synapticActivityCounter
end
function alifLearn(wRec,
exInType,
wRecChange,
arrayProjection4d,
neuronInactivityCounter,
synapticActivityCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapticActivityCounter_cpu = synapticActivityCounter |> cpu
synapticActivityCounter_cpu = synapticActivityCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# -W if less than 10% of repeat avg, +W otherwise
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapticActivityCounter[:,:,i]) / length(synapticActivityCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapticActivityCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapticActivityCounter_cpu = synapticActivityCounter_cpu .* arrayProjection4d_cpu
synapticActivityCounter = synapticActivityCounter_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapticActivityCounter
end
function onLearn!(wOut,
wOutChange,
arrayProjection4d)
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# adaptive wOut to help convergence using c_decay
wOut .-= 0.001 .* wOut
#TODO synaptic strength
#TODO neuroplasticity
return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
end
#TODO
function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col)
wRec, # (row, col, n)
wRecChange,
vt,
neuronInactivityCounter,
synapticActivityCounter) # (row, col, n)
synapseReconnectDelayCounter) # (row, col, n)
i1,i2,i3 = size(wRec)
# merge weight
# adjust weight based on vt progress and repeatition (90% +w, 10% -w)
# -w all non-fire connection except mature connection
# prune weak connection
# rewire synapse connection
# for each neuron, find total number of synaptic conn that should draw
# new connection to firing and non-firing neurons pool
subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
@@ -460,7 +494,7 @@ function neuroplasticity(synapseConnectionNumber,
println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# clear -1.0 marker
GeneralUtils.replaceElements!(wRec, -1.0, synapticActivityCounter, -0.99)
GeneralUtils.replaceElements!(wRec, -1.0, synapseReconnectDelayCounter, -0.99)
GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
for i in 1:i3
@@ -473,7 +507,7 @@ function neuroplasticity(synapseConnectionNumber,
a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
mask = (!iszero).(w)
GeneralUtils.replaceElements!(mask, 1, a, 0)
synapticActivityCounter[:,:,i] = a
synapseReconnectDelayCounter[:,:,i] = a
else
remaining = 0
if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
@@ -482,7 +516,7 @@ function neuroplasticity(synapseConnectionNumber,
# add new conn to firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
toAddConn)
totalNewConn[1,1,i] += remaining
end
@@ -490,12 +524,12 @@ function neuroplasticity(synapseConnectionNumber,
# add new conn to non-firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
totalNewConn[1,1,i])
if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapticActivityCounter[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
remaining)
end
end
@@ -505,6 +539,146 @@ function neuroplasticity(synapseConnectionNumber,
return wRec
end
function alifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelayCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
vt,
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# -W if less than 10% of repeat avg, +W otherwise
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelayCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
end
function onLearn!(wOut,
wOutChange,
arrayProjection4d)
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# adaptive wOut to help convergence using c_decay
wOut .-= 0.001 .* wOut
end
# function neuroplasticity(synapseConnectionNumber,
# zitCumulative, # (row, col)
# wRec, # (row, col, n)
# neuronInactivityCounter,
# synapseReconnectDelayCounter) # (row, col, n)
# i1,i2,i3 = size(wRec)
# # for each neuron, find total number of synaptic conn that should draw
# # new connection to firing and non-firing neurons pool
# subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
# # for each neuron, count how many synap already subscribed to firing-neurons
# zw = zitCumulative .* wRec
# subToFireNeuron_current = sum(GeneralUtils.isBetween.(zw, 0.0, 100.0), dims=(1,2)) # (1, 1, n)
# zitMask = (!iszero).(zitCumulative) # zitMask of firing neurons = 1, non-firing = 0
# projection = ones(i1,i2,i3)
# zitMask = zitMask .* projection # (row, col, n)
# totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n)
# println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# # clear -1.0 marker
# GeneralUtils.replaceElements!(wRec, -1.0, synapseReconnectDelayCounter, -0.99)
# GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
# for i in 1:i3
# if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight
# println("neuron die")
# neuronInactivityCounter[:,:,i] .= 0 # reset
# w = random_wRec(i1,i2,1,synapseConnectionNumber)
# wRec[:,:,i] .= w
# a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
# mask = (!iszero).(w)
# GeneralUtils.replaceElements!(mask, 1, a, 0)
# synapseReconnectDelayCounter[:,:,i] = a
# else
# remaining = 0
# if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
# toAddConn = subToFireNeuron_toBe - subToFireNeuron_current[1,1,i]
# totalNewConn[1,1,i] = totalNewConn[1,1,i] - toAddConn
# # add new conn to firing neurons pool
# remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# toAddConn)
# totalNewConn[1,1,i] += remaining
# end
# # add new conn to non-firing neurons pool
# remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# totalNewConn[1,1,i])
# if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
# remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# remaining)
# end
# end
# end
# # error("DEBUG -> neuroplasticity $(Dates.now())")
# return wRec
# end
# learningLiquidity(x) = -0.0001x + 1 # -10000 to +10000; f(x) = -5e-05x+0.5
function learningLiquidity(x)

View File

@@ -23,7 +23,7 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
timeStep::Union{AbstractArray, Nothing} = nothing
learningStage::Union{AbstractArray, Nothing} = nothing # 0 inference, 1 start, 2 during, 3 end learning
inputSize::Union{AbstractArray, Nothing} = nothing
zit::Union{AbstractArray, Nothing} = nothing # 3D activation matrix
zit::Union{AbstractArray, Nothing} = nothing # RSNN 3D activation matrix (row, col, batch)
zitCumulative::Union{AbstractArray, Nothing} = nothing
exInType::Union{AbstractArray, Nothing} = nothing
modelError::Union{AbstractArray, Nothing} = nothing # store RSNN error
@@ -58,7 +58,7 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
lif_firingCounter::Union{AbstractArray, Nothing} = nothing
lif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
lif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
lif_synapseReconnectDelayCounter::Union{AbstractArray, Nothing} = nothing
lif_synapseConnectionNumber::Union{Int, Nothing} = nothing
lif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
@@ -99,7 +99,7 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
alif_firingCounter::Union{AbstractArray, Nothing} = nothing
alif_firingTargetFrequency::Union{AbstractArray, Nothing} = nothing
alif_neuronInactivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapticActivityCounter::Union{AbstractArray, Nothing} = nothing
alif_synapseReconnectDelayCounter::Union{AbstractArray, Nothing} = nothing
alif_synapseConnectionNumber::Union{Int, Nothing} = nothing
alif_synapticWChangeCounter::Union{AbstractArray, Nothing} = nothing
@@ -230,15 +230,15 @@ function kfn_1(params::Dict; device=cpu)
# count subscribed synapse activity, just like epsilonRec but without decay.
# use to adjust weight based on how often neural pathway is used
kfn.lif_synapticActivityCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
kfn.lif_synapseReconnectDelayCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticActivityCounter, 0)
kfn.lif_synapticActivityCounter = kfn.lif_synapticActivityCounter |> device
# initial value subscribed conn, synapseReconnectDelayCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapseReconnectDelayCounter, 0)
kfn.lif_synapseReconnectDelayCounter = kfn.lif_synapseReconnectDelayCounter |> device
kfn.lif_synapticWChangeCounter = Array(similar(kfn.lif_wRec) .= -0.99) # -0.99 for non-sub conn
mask = Array((!iszero).(kfn.lif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
# initial value subscribed conn, synapseReconnectDelayCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.lif_synapticWChangeCounter, 1.0)
kfn.lif_synapticWChangeCounter = kfn.lif_synapticWChangeCounter |> device
@@ -285,14 +285,14 @@ function kfn_1(params::Dict; device=cpu)
kfn.alif_firingCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_firingTargetFrequency = (similar(kfn.alif_wRec) .= 0.1)
kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 0)
kfn.alif_synapticActivityCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
kfn.alif_synapseReconnectDelayCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticActivityCounter, 0)
kfn.alif_synapticActivityCounter = kfn.alif_synapticActivityCounter |> device
# initial value subscribed conn, synapseReconnectDelayCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapseReconnectDelayCounter, 0)
kfn.alif_synapseReconnectDelayCounter = kfn.alif_synapseReconnectDelayCounter |> device
kfn.alif_synapticWChangeCounter = Array(similar(kfn.alif_wRec) .= -0.99) # -9 for non-sub conn
mask = Array((!iszero).(kfn.alif_wRec))
# initial value subscribed conn, synapticActivityCounter range -10000 to +10000
# initial value subscribed conn, synapseReconnectDelayCounter range -10000 to +10000
GeneralUtils.replaceElements!(mask, 1, kfn.alif_synapticWChangeCounter, 1.0)
kfn.alif_synapticWChangeCounter = kfn.alif_synapticWChangeCounter |> device