Files
IronpenGPU/src/learn.jl
2023-09-10 11:28:40 +07:00

733 lines
29 KiB
Julia

module learn
export learn!, compute_paramsChange!
using Statistics, Random, LinearAlgebra, JSON3, Flux, CUDA, Dates
using GeneralUtils
using ..type, ..snnUtil
#------------------------------------------------------------------------------------------------100
function compute_paramsChange!(kfn::kfn_1, modelError, outputError)
lifComputeParamsChange!(kfn.timeStep,
kfn.lif_phi,
kfn.lif_epsilonRec,
kfn.lif_eta,
kfn.lif_eRec,
kfn.lif_wRec,
kfn.lif_exInType,
kfn.lif_wRecChange,
kfn.on_wOut,
kfn.lif_firingCounter,
kfn.lif_firingTargetFrequency,
kfn.lif_arrayProjection4d,
kfn.lif_error,
modelError,
kfn.inputSize,
)
alifComputeParamsChange!(kfn.timeStep,
kfn.alif_phi,
kfn.alif_epsilonRec,
kfn.alif_eta,
kfn.alif_eRec,
kfn.alif_wRec,
kfn.alif_exInType,
kfn.alif_wRecChange,
kfn.on_wOut,
kfn.alif_firingCounter,
kfn.alif_firingTargetFrequency,
kfn.alif_arrayProjection4d,
kfn.alif_error,
modelError,
kfn.alif_epsilonRecA,
kfn.alif_beta,
)
onComputeParamsChange!(kfn.on_phi,
kfn.on_epsilonRec,
kfn.on_eta,
kfn.on_eRec,
kfn.on_wOutChange,
kfn.on_arrayProjection4d,
kfn.on_error,
outputError,
)
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
end
function lifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
inputSize::CuArray,
)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight,
# use absolute because only magnitude is needed
wOutSum_all = reshape( abs.(sum(wOut, dims=3)), (1,1,:, size(wOut, 4)) ) # (1,1,allNeuron,batch)
# get only each lif neuron's wOut, leaving out other neuron's wOut
startIndex = prod(inputSize) +1
stopIndex = startIndex + size(wRec, 3) -1
wOutSum = @view(wOutSum_all[1,1, startIndex:stopIndex, :])
wOutSum = reshape(wOutSum, (1, 1, size(wOutSum, 1), size(wOutSum, 2))) # (1,1,n,batch)
# nError a.k.a. learning signal use dopamine concept,
# this neuron receive summed error signal (modelError)
nError .= (modelError .* wOutSum) .* arrayProjection4d
eRec .= phi .* epsilonRec
wRecChange .+= (-eta .* nError .* eRec)
# frequency regulator
wRecChange .+= 0.001 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
eta .* eRec
# if sum(timeStep) == 785
# epsilonRec_cpu = epsilonRec |> cpu
# println("modelError ", modelError)
# println("")
# wchange = (-eta .* nError .* eRec) |> cpu
# println("wchange 5 1 ", wchange[:,:,5,1])
# println("")
# println("wchange 5 2 ", wchange[:,:,5,2])
# println("")
# println("epsilonRec 5 1 ", epsilonRec_cpu[:,:,5,1])
# println("")
# println("epsilonRec 5 2 ", epsilonRec_cpu[:,:,5,2])
# println("")
# error("DEBUG lifComputeParamsChange!")
# end
# reset epsilonRec
epsilonRec .= 0
end
function alifComputeParamsChange!( timeStep::CuArray,
phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wRec::CuArray,
exInType::CuArray,
wRecChange::CuArray,
wOut::CuArray,
firingCounter::CuArray,
firingTargetFrequency::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
modelError::CuArray,
epsilonRecA::CuArray,
beta::CuArray
)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight,
# use absolute because only magnitude is needed
wOutSum_all = reshape( abs.(sum(wOut, dims=3)), (1,1,:, size(wOut, 4)) ) # (1,1,allNeuron,batch)
# get only each lif neuron's wOut, leaving out other neuron's wOut
wOutSum = @view(wOutSum_all[1,1, end-size(wRec, 3)+1:end, :])
wOutSum = reshape(wOutSum, (1, 1, size(wOutSum, 1), size(wOutSum, 2))) # (1,1,n,batch)
# nError a.k.a. learning signal use dopamine concept,
# this neuron receive summed error signal (modelError)
nError .= (modelError .* wOutSum) .* arrayProjection4d
eRec .= phi .* (epsilonRec .- (beta .* epsilonRecA)) # use eq. 25
wRecChange .+= (-eta .* nError .* eRec)
# frequency regulator
wRecChange .+= 0.001 .* ((firingTargetFrequency - (firingCounter./timeStep)) ./ timeStep) .*
eta .* eRec
# reset epsilonRec
epsilonRec .= 0
epsilonRecA .= 0
# error("DEBUG -> alifComputeParamsChange! $(Dates.now())")
end
function onComputeParamsChange!(phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
eRec::CuArray,
wOutChange::CuArray,
arrayProjection4d::CuArray,
nError::CuArray,
outputError::CuArray # outputError is output neuron's error
)
eRec .= phi .* epsilonRec
nError .= reshape(outputError, (1, 1, :, size(outputError, 2))) .* arrayProjection4d
wOutChange .+= (-eta .* nError .* eRec)
# reset epsilonRec
epsilonRec .= 0
# error("DEBUG -> onComputeParamsChange! $(Dates.now())")
end
function lifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] * # dopamine concept, this neuron receive summed error signal
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
)
)
end
end
function alifComputeParamsChange!( phi::AbstractArray,
epsilonRec::AbstractArray,
epsilonRecA::AbstractArray,
eta::AbstractArray,
wRec::AbstractArray,
wRecChange::AbstractArray,
beta::AbstractArray,
wOut::AbstractArray,
modelError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
# Bₖⱼ in paper, sum() to get each neuron's total wOut weight
wOutSum = reshape(sum(wOut, dims=3), (d1, :, d4))
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wRecChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
# eRec_v
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .+
# eRec_a
((view(phi, :, :, i, j)[1] * view(beta, :, :, i, j)[1]) .*
view(epsilonRecA, :, :, i, j))
) .*
# nError a.k.a. learning signal
(
view(modelError, :, j)[1] *
# RSNN neuron's total wOut weight (neuron synaptic subscription .* wOutSum)
view(wOutSum, :, :, j)[i]
# sum(GeneralUtils.isNotEqual.(view(wRec, :, :, i, j), 0) .*
# view(wOutSum, :, :, j))
)
end
end
function onComputeParamsChange!(phi::AbstractArray,
epsilonRec::AbstractArray,
eta::AbstractArray,
wOutChange::AbstractArray,
outputError::AbstractArray)
d1, d2, d3, d4 = size(epsilonRec)
for j in 1:d4, i in 1:d3 # compute along neurons axis of every batch
# how much error of this neuron 1-spike causing each output neuron's error
view(wOutChange, :, :, i, j) .+= (-1 * view(eta, :, :, i, j)[1]) .*
# eRec
(
(view(phi, :, :, i, j)[1] .* view(epsilonRec, :, :, i, j)) .*
# nError a.k.a. learning signal, output neuron receives error of its own answer - correct answer.
view(outputError, :, j)[i]
)
end
end
function learn!(kfn::kfn_1, device=cpu)
# lif learn
kfn.lif_wRec, kfn.lif_neuronInactivityCounter, kfn.lif_synapseReconnectDelayCounter =
lifLearn(kfn.lif_wRec,
kfn.lif_wRecChange,
kfn.lif_exInType,
kfn.lif_arrayProjection4d,
kfn.lif_neuronInactivityCounter,
kfn.lif_synapseReconnectDelayCounter,
kfn.lif_synapseConnectionNumber,
kfn.lif_synapticWChangeCounter,
kfn.lif_eta,
kfn.lif_vt,
kfn.zitCumulative,
device)
# alif learn
kfn.alif_wRec, kfn.alif_neuronInactivityCounter, kfn.alif_synapseReconnectDelayCounter =
alifLearn(kfn.alif_wRec,
kfn.alif_wRecChange,
kfn.alif_exInType,
kfn.alif_arrayProjection4d,
kfn.alif_neuronInactivityCounter,
kfn.alif_synapseReconnectDelayCounter,
kfn.alif_synapseConnectionNumber,
kfn.alif_synapticWChangeCounter,
kfn.alif_eta,
kfn.alif_vt,
kfn.zitCumulative,
device)
# on learn
onLearn!(kfn.on_wOut,
kfn.on_wOutChange,
kfn.on_arrayProjection4d)
# wrap up learning session
if kfn.learningStage == [3]
kfn.learningStage = [0]
end
# error("DEBUG -> kfn learn! $(Dates.now())")
end
# function lifLearn(wRec,
# exInType,
# wRecChange,
# arrayProjection4d,
# neuronInactivityCounter,
# synapseReconnectDelayCounter,
# synapseConnectionNumber,
# synapticWChangeCounter, #TODO
# eta,
# zitCumulative,
# device)
# # merge learning weight with average learning weight of all batch
# wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
# wRec .= (exInType .* wRec) .+ wch
# arrayProjection4d_cpu = arrayProjection4d |> cpu
# wRec_cpu = wRec |> cpu
# wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
# eta_cpu = eta |> cpu
# eta_cpu = eta_cpu[:,:,:,1]
# neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
# neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
# zitCumulative_cpu = zitCumulative |> cpu
# zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# # -W if less than 10% of repeat avg, +W otherwise
# _, _, i3 = size(wRec_cpu)
# for i in 1:i3
# x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
# mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
# wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
# end
# # weak / negative synaptic connection will get randomed in neuroplasticity()
# wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# # neuroplasticity, work on CPU side
# wRec_cpu = neuroplasticity(synapseConnectionNumber,
# zitCumulative_cpu,
# wRec_cpu,
# neuronInactivityCounter_cpu,
# synapseReconnectDelayCounter_cpu)
# wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
# wRec = wRec_cpu |> device
# neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
# neuronInactivityCounter = neuronInactivityCounter_cpu |> device
# synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
# synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
# return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
# end
function lifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelayCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
vt,
zitCumulative,
device)
# transfer data to cpu
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
wRecChange_cpu = wRecChange |> cpu
wRecChange_cpu = wRecChange_cpu[:,:,:,1]
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1]
#TODO neuroplasticity, work on CPU side
wRec_cpu, neuronInactivityCounter_cpu, synapseReconnectDelayCounter_cpu,
= neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
wRecChange_cpu,
vt,
neuronInactivityCounter_cpu,
synapseReconnectDelayCounter_cpu)
# # merge learning weight with average learning weight of all batch
# wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
# wRec .= (exInType .* wRec) .+ wch
# # (row, col)
# # -W if less than 10% of repeat avg, +W otherwise
# _, _, i3 = size(wRec_cpu)
# for i in 1:i3
# x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
# mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
# wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
# end
# # weak / negative synaptic connection will get randomed in neuroplasticity()
# wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# neuroplasticity, work on CPU side
# wRec_cpu = neuroplasticity(synapseConnectionNumber,
# zitCumulative_cpu,
# wRec_cpu,
# wRecChange_cpu,
# vt,
# neuronInactivityCounter_cpu,
# synapseReconnectDelayCounter_cpu)
# transfer data backto gpu
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
end
#TODO
function neuroplasticity(synapseConnectionNumber,
zitCumulative, # (row, col)
wRec, # (row, col, n)
wRecChange,
vt,
neuronInactivityCounter,
synapseReconnectDelayCounter) # (row, col, n)
i1,i2,i3 = size(wRec)
# merge weight
# adjust weight based on vt progress and repeatition (90% +w, 10% -w)
# -w all non-fire connection except mature connection
# prune weak connection
# rewire synapse connection
# for each neuron, find total number of synaptic conn that should draw
# new connection to firing and non-firing neurons pool
subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
# for each neuron, count how many synap already subscribed to firing-neurons
zw = zitCumulative .* wRec
subToFireNeuron_current = sum(GeneralUtils.isBetween.(zw, 0.0, 100.0), dims=(1,2)) # (1, 1, n)
zitMask = (!iszero).(zitCumulative) # zitMask of firing neurons = 1, non-firing = 0
projection = ones(i1,i2,i3)
zitMask = zitMask .* projection # (row, col, n)
totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n)
println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# clear -1.0 marker
GeneralUtils.replaceElements!(wRec, -1.0, synapseReconnectDelayCounter, -0.99)
GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
for i in 1:i3
if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight
println("neuron die")
neuronInactivityCounter[:,:,i] .= 0 # reset
w = random_wRec(i1,i2,1,synapseConnectionNumber)
wRec[:,:,i] .= w
a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
mask = (!iszero).(w)
GeneralUtils.replaceElements!(mask, 1, a, 0)
synapseReconnectDelayCounter[:,:,i] = a
else
remaining = 0
if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
toAddConn = subToFireNeuron_toBe - subToFireNeuron_current[1,1,i]
totalNewConn[1,1,i] = totalNewConn[1,1,i] - toAddConn
# add new conn to firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
toAddConn)
totalNewConn[1,1,i] += remaining
end
# add new conn to non-firing neurons pool
remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
@view(wRec[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
totalNewConn[1,1,i])
if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
@view(wRec[:,:,i]),
@view(synapseReconnectDelayCounter[:,:,i]),
remaining)
end
end
end
# error("DEBUG -> neuroplasticity $(Dates.now())")
return wRec
end
function alifLearn(wRec,
wRecChange,
exInType,
arrayProjection4d,
neuronInactivityCounter,
synapseReconnectDelayCounter,
synapseConnectionNumber,
synapticWChangeCounter, #TODO
eta,
vt,
zitCumulative,
device)
# merge learning weight with average learning weight of all batch
wch = sum(wRecChange, dims=4) ./ (size(wRec, 4)) .* arrayProjection4d
wRec .= (exInType .* wRec) .+ wch
arrayProjection4d_cpu = arrayProjection4d |> cpu
wRec_cpu = wRec |> cpu
wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n)
eta_cpu = eta |> cpu
eta_cpu = eta_cpu[:,:,:,1]
neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n)
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter |> cpu
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu[:,:,:,1]
zitCumulative_cpu = zitCumulative |> cpu
zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col)
# -W if less than 10% of repeat avg, +W otherwise
_, _, i3 = size(wRec_cpu)
for i in 1:i3
x = 0.1 * (sum(synapseReconnectDelayCounter[:,:,i]) / length(synapseReconnectDelayCounter[:,:,i]))
mask = GeneralUtils.replaceLessThan.(wRec_cpu[:,:,i], x, -1, 1)
wRec_cpu[:,:,i] .+= mask .* eta_cpu[:,:,i] .* wRec_cpu[:,:,i]
end
# weak / negative synaptic connection will get randomed in neuroplasticity()
wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.01, -1.0) # mark with -1.0
# neuroplasticity, work on CPU side
wRec_cpu = neuroplasticity(synapseConnectionNumber,
zitCumulative_cpu,
wRec_cpu,
neuronInactivityCounter_cpu,
synapseReconnectDelayCounter_cpu)
wRec_cpu = wRec_cpu .* arrayProjection4d_cpu
wRec = wRec_cpu |> device
neuronInactivityCounter_cpu = neuronInactivityCounter_cpu .* arrayProjection4d_cpu
neuronInactivityCounter = neuronInactivityCounter_cpu |> device
synapseReconnectDelayCounter_cpu = synapseReconnectDelayCounter_cpu .* arrayProjection4d_cpu
synapseReconnectDelayCounter = synapseReconnectDelayCounter_cpu |> device
# error("DEBUG -> alifLearn! $(Dates.now())")
return wRec, neuronInactivityCounter, synapseReconnectDelayCounter
end
function onLearn!(wOut,
wOutChange,
arrayProjection4d)
# merge learning weight with average learning weight
wOut .+= (sum(wOutChange, dims=4) ./ (size(wOut, 4))) .* arrayProjection4d
# adaptive wOut to help convergence using c_decay
wOut .-= 0.001 .* wOut
end
# function neuroplasticity(synapseConnectionNumber,
# zitCumulative, # (row, col)
# wRec, # (row, col, n)
# neuronInactivityCounter,
# synapseReconnectDelayCounter) # (row, col, n)
# i1,i2,i3 = size(wRec)
# # for each neuron, find total number of synaptic conn that should draw
# # new connection to firing and non-firing neurons pool
# subToFireNeuron_toBe = Int(floor(0.7 * synapseConnectionNumber))
# # for each neuron, count how many synap already subscribed to firing-neurons
# zw = zitCumulative .* wRec
# subToFireNeuron_current = sum(GeneralUtils.isBetween.(zw, 0.0, 100.0), dims=(1,2)) # (1, 1, n)
# zitMask = (!iszero).(zitCumulative) # zitMask of firing neurons = 1, non-firing = 0
# projection = ones(i1,i2,i3)
# zitMask = zitMask .* projection # (row, col, n)
# totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n)
# println("neuroplasticity, from $(synapseConnectionNumber*size(totalNewConn, 3)) conn, $(sum(totalNewConn)) are replaced")
# # clear -1.0 marker
# GeneralUtils.replaceElements!(wRec, -1.0, synapseReconnectDelayCounter, -0.99)
# GeneralUtils.replaceElements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required
# for i in 1:i3
# if neuronInactivityCounter[1:1:i][1] < -10000 # neuron die i.e. reset all weight
# println("neuron die")
# neuronInactivityCounter[:,:,i] .= 0 # reset
# w = random_wRec(i1,i2,1,synapseConnectionNumber)
# wRec[:,:,i] .= w
# a = similar(w) .= -0.99 # synapseConnectionNumber of this neuron
# mask = (!iszero).(w)
# GeneralUtils.replaceElements!(mask, 1, a, 0)
# synapseReconnectDelayCounter[:,:,i] = a
# else
# remaining = 0
# if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe
# toAddConn = subToFireNeuron_toBe - subToFireNeuron_current[1,1,i]
# totalNewConn[1,1,i] = totalNewConn[1,1,i] - toAddConn
# # add new conn to firing neurons pool
# remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# toAddConn)
# totalNewConn[1,1,i] += remaining
# end
# # add new conn to non-firing neurons pool
# remaining = addNewSynapticConn!(zitMask[:,:,i], 0,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# totalNewConn[1,1,i])
# if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot
# remaining = addNewSynapticConn!(zitMask[:,:,i], 1,
# @view(wRec[:,:,i]),
# @view(synapseReconnectDelayCounter[:,:,i]),
# remaining)
# end
# end
# end
# # error("DEBUG -> neuroplasticity $(Dates.now())")
# return wRec
# end
# learningLiquidity(x) = -0.0001x + 1 # -10000 to +10000; f(x) = -5e-05x+0.5
function learningLiquidity(x)
if x > 10000
y = 0.0
elseif x < -10000
y = 1.0
else
y = -5e-05x+0.5 # range -10000 to +10000
end
return y
end
end # module