update version

This commit is contained in:
2023-05-30 09:05:23 +07:00
parent 8f9b2dca1d
commit ddb58d2fbd
37 changed files with 6805 additions and 111 deletions

View File

@@ -32,7 +32,7 @@ using .learn
# using .interface
#------------------------------------------------------------------------------------------------100
""" version 0.0.3
""" version 0.0.4
Todo:
[4] implement dormant connection
[] using RL to control learning signal
@@ -40,16 +40,19 @@ using .learn
[5] training should include adjusting α, neuron membrane potential decay factor
which defined by neuron.tau_m formula in type.jl
Change from version: 0.0.2
- new learning method
- use Flux.logitcrossentropy for overall error
- remove ΔwRecChange that apply immediately during online learning
Change from version: 0.0.3
All features
- ΔwRecChange during input signal ingestion will be merged at the end of learning
- all RSNN and output neuron learning associate.
- synapticStrength apply at the end of learning
- collect ΔwRecChange during online learning (0-784th) and merge with wRec at
the end learning (1000th).
- compute model error at the end learning. Model error times with 5 constant for
higher learning impact than the error during online
All features
- multidispatch + for loop as main compute method
- hard connection constrain yes
- normalize output yes
@@ -80,18 +83,22 @@ using .learn
- wRec should not normalized whole. it should be local 5 conn normalized.
- neuroplasticity() i.e. change connection
- add multi threads
- during 0 training if 1-9 output neuron fires, adjust weight only those neurons
- add maximum weight cap of each connection
Removed features
- ΔwRecChange that apply immediately during online learning
- error by percent of vth-v_t1
- Δweight * connection strength
- weaker connection should be harder to increase strength. It requires a lot of
repeat activation to get it stronger. While strong connction requires a lot of
inactivation to get it weaker. The concept is strong connection will lock
correct neural pathway through repeated use of the right connection i.e. keep training
on the correct answer -> strengthen the right neural pathway (connections) ->
this correct neural pathway resist to change.
Not used connection should dissapear (forgetting).
Removed features
- Δweight * connection strength
Not used connection should dissapear (forgetting).
- during 0 training if 1-9 output neuron fires, adjust weight only those neurons
- use Flux.logitcrossentropy for overall error
"""

View File

@@ -73,7 +73,7 @@ function (kfn::kfn_1)(m::model, input_data::AbstractVector)
kfn.neuronsArray[i].z_t1 = data
end
kfn.firedNeurons_t0 = [n.z_t for n in kfn.neuronsArray] #TODO check if it is used?
kfn.firedNeurons_t0 = [n.z_t for n in kfn.neuronsArray]
# Threads.@threads for n in kfn.neuronsArray
for n in kfn.neuronsArray
@@ -93,7 +93,7 @@ function (kfn::kfn_1)(m::model, input_data::AbstractVector)
outputNeuron_v_t1 = [n.v_t1 for n in kfn.outputNeuronsArray]
return out::Array{Bool}, outputNeuron_v_t1::Array{Float64}, sum(kfn.firedNeurons_t1),
kfn.exSignalSum, kfn.inSignalsum
[i.ExInSignalSum for i in kfn.outputNeuronsArray]
end
#------------------------------------------------------------------------------------------------100
@@ -218,15 +218,15 @@ function (n::linearNeuron)(kfn::T) where T<:knowledgeFn
n.vError = n.v_t1 # store voltage that will be used to calculate error later
else
recSignal = n.wRec .* n.z_i_t
if n.id == 1 #FIXME debugging output neuron dead
for i in recSignal
if i > 0
kfn.exSignalSum += i
elseif i < 0
kfn.inSignalsum += i
else
end
end
n.ExInSignalSum = 0.0 #CHANGE for ploting
for i in recSignal
# if i > 0
# kfn.exSignalSum += i
# elseif i < 0
# kfn.inSignalsum += i
# else
# end
n.ExInSignalSum += i
end
n.recSignal = sum(recSignal) # signal from other neuron that this neuron subscribed
n.alpha_v_t = n.alpha * n.v_t

View File

@@ -8,6 +8,73 @@ export learn!, compute_wRecChange!, computeModelError
#------------------------------------------------------------------------------------------------100
function computeModelError(modelRespond, correctAnswer; magnitude::Float64=1.0)
error = ((correctAnswer .- modelRespond) .* magnitude)
return error::Vector{Float64}
end
function compute_wRecChange!(m::model, error::Vector{Float64}, correctAnswer::AbstractVector)
compute_wRecChange!(m.knowledgeFn[:I], error, correctAnswer)
end
function compute_wRecChange!(kfn::kfn_1, errors::Vector{Float64}, correctAnswer::AbstractVector)
for (i, error) in enumerate(errors)
if error == 0 # output is correct
# Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
# # for n in kfn.neuronsArray
# synapticConnStrength!(n, true)
# end
# synapticConnStrength!(kfn.outputNeuronsArray[i], true)
else # output is wrong, error occurs
if correctAnswer[i] == 1 # high priority answer
error = error *
abs(kfn.outputNeuronsArray[i].v_th - kfn.outputNeuronsArray[i].vError)
else # low priority answer
error = error *
abs(kfn.outputNeuronsArray[i].v_th - kfn.outputNeuronsArray[i].vError)
end
Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
# for n in kfn.neuronsArray
compute_wRecChange!(n, error)
# synapticConnStrength!(n, false)
end
compute_wRecChange!(kfn.outputNeuronsArray[i], error)
# synapticConnStrength!(kfn.outputNeuronsArray[i], false)
end
end
end
function compute_wRecChange!(n::passthroughNeuron, error::Float64)
# skip
end
function compute_wRecChange!(n::lifNeuron, error::Float64)
n.eRec = n.phi * n.epsilonRec
ΔwRecChange = n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
end
function compute_wRecChange!(n::alifNeuron, error::Float64)
n.eRec_v = n.phi * n.epsilonRec
n.eRec_a = n.phi * n.beta * n.epsilonRecA
n.eRec = n.eRec_v + n.eRec_a
ΔwRecChange = n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
reset_epsilonRecA!(n)
end
function compute_wRecChange!(n::linearNeuron, error::Float64)
n.eRec = n.phi * n.epsilonRec
ΔwRecChange = n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
end
function learn!(m::model)
learn!(m.knowledgeFn[:I])
end
@@ -30,64 +97,13 @@ function learn!(kfn::kfn_1)
end
end
function computeModelError(modelRespond, correctAnswer; magnitude::Float64=1.0)
if correctAnswer === nothing
correctAnswer = BitArray(zeros(length(modelRespond)))
else
correctAnswer = Bool.(correctAnswer) # correct answer for kfn I
end
return Flux.logitcrossentropy(modelRespond, correctAnswer) .* magnitude
end
function compute_wRecChange!(m::model, error::Float64)
compute_wRecChange!(m.knowledgeFn[:I], error)
end
function compute_wRecChange!(kfn::kfn_1, error::Float64)
# compute kfn error for each neuron
Threads.@threads for n in kfn.neuronsArray
# for n in kfn.neuronsArray
compute_wRecChange!(n, error)
end
for n in kfn.outputNeuronsArray
compute_wRecChange!(n, error)
end
end
function compute_wRecChange!(n::passthroughNeuron, error::Float64)
# skip
end
function compute_wRecChange!(n::lifNeuron, error::Float64)
n.eRec = n.phi * n.epsilonRec
ΔwRecChange = -n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
end
function compute_wRecChange!(n::alifNeuron, error::Float64)
n.eRec_v = n.phi * n.epsilonRec
n.eRec_a = -n.phi * n.beta * n.epsilonRecA
n.eRec = n.eRec_v + n.eRec_a
ΔwRecChange = -n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
reset_epsilonRecA!(n)
end
function compute_wRecChange!(n::linearNeuron, error::Float64)
n.eRec = n.phi * n.epsilonRec
ΔwRecChange = -n.eta * error * n.eRec
n.wRecChange .+= ΔwRecChange
reset_epsilonRec!(n)
end
function learn!(n::T, firedNeurons, nExInType) where T<:inputNeuron
# skip
end
function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
wSign_0 = sign.(n.wRec) # original sign
# n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
n.wRec += n.wRecChange # merge wRecChange into wRec
reset_wRecChange!(n)
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
@@ -95,15 +111,15 @@ function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
# normalize wRec peak to prevent input signal overwhelming neuron
normalizePeak!(n.wRec, n.wRecChange, 2)
# set weight that fliped sign to 0 for random new connection
# n.wRec .*= nonFlipedSign
n.wRec .*= nonFlipedSign
capMaxWeight!(n.wRec) # cap maximum weight
synapticConnStrength!(n)
synapticConnStrength!(n, "updown")
neuroplasticity!(n, firedNeurons, nExInType)
end
function learn!(n::T, firedNeurons, nExInType, totalInputPort) where T<:outputNeuron
wSign_0 = sign.(n.wRec) # original sign
# n.wRecChange .*= (connStrengthAdjust.(n.synapticStrength))
n.wRec += n.wRecChange
reset_wRecChange!(n)
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
@@ -111,10 +127,9 @@ function learn!(n::T, firedNeurons, nExInType, totalInputPort) where T<:outputNe
# normalize wRec peak to prevent input signal overwhelming neuron
normalizePeak!(n.wRec, n.wRecChange, 2)
# set weight that fliped sign to 0 for random new connection
# n.wRec .*= nonFlipedSign
n.wRec .*= nonFlipedSign
capMaxWeight!(n.wRec) # cap maximum weight
# synapticConnStrength!(n) #CHANGE
synapticConnStrength!(n, "updown")
neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
end

View File

@@ -281,13 +281,13 @@ function synapticConnStrength(currentStrength::Float64, updown::String)
if currentStrength > 4 # strong connection
updatedStrength = currentStrength + (Δstrength * 1.0)
else
updatedStrength = currentStrength + (Δstrength * 1.0)
updatedStrength = currentStrength + (Δstrength * 0.1)
end
elseif updown == "down"
if currentStrength > 4
updatedStrength = currentStrength - (Δstrength * 1.0)
updatedStrength = currentStrength - (Δstrength * 0.5)
else
updatedStrength = currentStrength - (Δstrength * 1.0)
updatedStrength = currentStrength - (Δstrength * 0.2)
end
else
error("undefined condition line $(@__LINE__)")
@@ -298,29 +298,44 @@ end
""" Compute all synaptic connection strength of a neuron. Also mark n.wRec to 0 if wRec goes
below lowerlimit.
"""
function synapticConnStrength!(n::Union{computeNeuron, outputNeuron})
for (i, connStrength) in enumerate(n.synapticStrength)
# check whether connStrength increase or decrease based on usage from n.epsilonRec
""" use n.z_i_t_commulative instead of the best choice, epsilonRec, here because ΔwRecChange
calculation in learn!() will reset epsilonRec to zeroes vector in case where
output neuron fires and trigger learn!() just before this synapticConnStrength
calculation.
Since n.z_i_t_commulative indicates whether a synaptic connection were used or not, it is
ok to use. n.z_i_t_commulative also span across a training sample without resetting.
"""
updown = n.z_i_t_commulative[i] == 0 ? "down" : "up"
updatedConnStrength = synapticConnStrength(connStrength, updown)
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
n.wRec[i] = 0.0
function synapticConnStrength!(n::Union{computeNeuron, outputNeuron}, mode::String)
if mode == "updown"
for (i, connStrength) in enumerate(n.synapticStrength)
# check whether connStrength increase or decrease based on usage from n.epsilonRec
""" use n.z_i_t_commulative instead of the best choice, epsilonRec, here because ΔwRecChange
calculation in learn!() will reset epsilonRec to zeroes vector in case where
output neuron fires and trigger learn!() just before this synapticConnStrength
calculation.
Since n.z_i_t_commulative indicates whether a synaptic connection were used or not, it is
ok to use. n.z_i_t_commulative also span across a training sample without resetting.
"""
updown = n.z_i_t_commulative[i] == 0 ? "down" : "up"
updatedConnStrength = synapticConnStrength(connStrength, updown)
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
n.wRec[i] = 0.0
end
n.synapticStrength[i] = updatedConnStrength
end
n.synapticStrength[i] = updatedConnStrength
elseif mode == "down"
for (i, connStrength) in enumerate(n.synapticStrength)
updatedConnStrength = synapticConnStrength(connStrength, "down")
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
n.wRec[i] = 0.0
end
n.synapticStrength[i] = updatedConnStrength
end
else
error("undefined condition line $(@__LINE__)")
end
end
function synapticConnStrength!(n::inputNeuron) end
function synapticConnStrength!(n::inputNeuron, correctness::Bool) end
""" normalize a part of a vector centering at a vector's maximum value along with nearby value
within its radius. radius must be odd number.

View File

@@ -115,9 +115,6 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn
nInhabitory::Array{Int64} = Int64[] # list of inhabitory neuron id
nExInType::Array{Int64} = Int64[] # list all neuron EX or IN
excitatoryPercent::Int64 = 60 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
exSignalSum = 0
inSignalsum = 0
end
#------------------------------------------------------------------------------------------------100
@@ -349,7 +346,7 @@ Base.@kwdef mutable struct lifNeuron <: computeNeuron
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
refractoryCounter::Int64 = 0
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
tau_m::Float64 = 50.0 # τ_m, membrane time constant in millisecond
eta::Float64 = 0.01 # η, learning rate
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
recSignal::Float64 = 0.0 # incoming recurrent signal
@@ -443,7 +440,7 @@ Base.@kwdef mutable struct alifNeuron <: computeNeuron
phi::Float64 = 0.0 # ϕ, psuedo derivative
refractoryDuration::Int64 = 3 # neuron's refractory period in millisecond
refractoryCounter::Int64 = 0
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
tau_m::Float64 = 50.0 # τ_m, membrane time constant in millisecond
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
recSignal::Float64 = 0.0 # incoming recurrent signal
alpha_v_t::Float64 = 0.0 # alpha * v_t
@@ -456,7 +453,7 @@ Base.@kwdef mutable struct alifNeuron <: computeNeuron
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
firingRate::Float64 = 0.0 # running average of firing rate, Hz
tau_a::Float64 = 0.0 # τ_a, adaption time constant in millisecond
tau_a::Float64 = 50.0 # τ_a, adaption time constant in millisecond
beta::Float64 = 0.15 # β, constant, value from paper
rho::Float64 = 0.0 # ρ, threshold adaptation decay factor
a::Float64 = 0.0 # threshold adaptation
@@ -547,13 +544,14 @@ Base.@kwdef mutable struct linearNeuron <: outputNeuron
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
refractoryCounter::Int64 = 0
tau_out::Float64 = 0.0 # τ_out, membrane time constant in millisecond
tau_out::Float64 = 25.0 # τ_out, membrane time constant in millisecond
eta::Float64 = 0.01 # η, learning rate
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
recSignal::Float64 = 0.0 # incoming recurrent signal
alpha_v_t::Float64 = 0.0 # alpha * v_t
firingCounter::Int64 = 0 # store how many times neuron fires
ExInSignalSum::Float64 = 0.0
end
""" linear neuron outer constructor