From 9c988583aa75dec356df80310e1347245d8a4407 Mon Sep 17 00:00:00 2001 From: ton Date: Sat, 26 Aug 2023 07:11:27 +0700 Subject: [PATCH] clear marker --- src/forward.jl | 8 +-- src/learn.jl | 115 ++++++++++++++++++++++++----------------- src/snnUtil.jl | 136 +++++++++++++++++++++++++++++++++++++++++++------ src/type.jl | 15 +++--- 4 files changed, 199 insertions(+), 75 deletions(-) diff --git a/src/forward.jl b/src/forward.jl index ef7f79d..36e441b 100644 --- a/src/forward.jl +++ b/src/forward.jl @@ -18,7 +18,7 @@ function (kfn::kfn_1)(input::AbstractArray) # what to do at the start of learning round if view(kfn.learningStage, 1)[1] == 1 # reset learning params - kfn.zit_cumulative .= 0 + kfn.zitCumulative .= 0 kfn.lif_vt .= 0 kfn.lif_wRecChange .= 0 @@ -118,7 +118,7 @@ function (kfn::kfn_1)(input::AbstractArray) reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))), reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2) kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3))) - kfn.zit_cumulative .+= kfn.zit + kfn.zitCumulative .+= kfn.zit # project 3D kfn zit into 4D on zit i1, i2, i3, i4 = size(kfn.on_zit) @@ -291,7 +291,7 @@ function lifForward( zit, # count synaptic inactivity if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription if !iszero(zit[i1,i2,i3,i4]) # synapse is active, reset counter - synapticInactivityCounter[i1,i2,i3,i4] = 10000 + synapticInactivityCounter[i1,i2,i3,i4] += 1 else # synapse is inactive, counting synapticInactivityCounter[i1,i2,i3,i4] -= 1 end @@ -478,7 +478,7 @@ function alifForward( zit, # count synaptic inactivity if !iszero(wRec[i1,i2,i3,i4]) # check if this is wRec subscription if !iszero(zit[i1,i2,i3,i4]) # synapse is active, reset counter - synapticInactivityCounter[i1,i2,i3,i4] = 10000 + synapticInactivityCounter[i1,i2,i3,i4] += 1 else # synapse is inactive, counting synapticInactivityCounter[i1,i2,i3,i4] -= 1 end diff --git a/src/learn.jl b/src/learn.jl index ad34b6e..1ab7aa2 100644 --- a/src/learn.jl +++ b/src/learn.jl @@ -270,8 +270,9 @@ function learn!(kfn::kfn_1, device=cpu) kfn.lif_wRecChange, kfn.lif_arrayProjection4d, kfn.lif_neuronInactivityCounter, + kfn.lif_synapticInactivityCounter, kfn.lif_synapticConnectionNumber, - kfn.zit_cumulative, + kfn.zitCumulative, device) # alif learn @@ -279,8 +280,9 @@ function learn!(kfn::kfn_1, device=cpu) kfn.alif_wRecChange, kfn.alif_arrayProjection4d, kfn.alif_neuronInactivityCounter, + kfn.lif_synapticInactivityCounter, kfn.alif_synapticConnectionNumber, - kfn.zit_cumulative, + kfn.zitCumulative, device) # on learn @@ -298,31 +300,38 @@ end function lifLearn!(wRec, wRecChange, arrayProjection4d, - inactivityCounter, + neuronInactivityCounter, + synapticInactivityCounter, synapticConnectionNumber, - zit_cumulative, + zitCumulative, device) # merge learning weight with average learning weight of all batch wRec .+= (sum(wRecChange, dims=4) ./ (size(wRec, 4))) .* arrayProjection4d wRec_cpu = wRec |> cpu wRec_cpu = wRec_cpu[:,:,:,1] # since every batch has the same neuron wRec, (row, col, n) - inactivityCounter_cpu = inactivityCounter |> cpu - inactivityCounter_cpu = inactivityCounter_cpu[:,:,:,1] # (row, col, n) - zit_cumulative_cpu = zit_cumulative |> cpu - zit_cumulative_cpu = zit_cumulative_cpu[:,:,1] # (row, col) + neuronInactivityCounter_cpu = neuronInactivityCounter |> cpu + neuronInactivityCounter_cpu = neuronInactivityCounter_cpu[:,:,:,1] # (row, col, n) + synapticInactivityCounter_cpu = synapticInactivityCounter |> cpu + synapticInactivityCounter_cpu = synapticInactivityCounter_cpu[:,:,:,1] + zitCumulative_cpu = zitCumulative |> cpu + zitCumulative_cpu = zitCumulative_cpu[:,:,1] # (row, col) # weak / negative synaptic connection will get randomed in neuroplasticity() wRec_cpu = GeneralUtils.replaceBetween.(wRec_cpu, 0.0, 0.1, -1.0) # mark with -1.0 # synaptic connection that has no inactivity will get randomed in neuroplasticity() - GeneralUtils.replace_elements!(inactivityCounter_cpu, 0.0, wRec_cpu, -1.0) - # reset lif_inactivity elements to 10000 - GeneralUtils.replace_elements!(inactivityCounter_cpu, 0.0, -9.0) # -9.0 is base value + GeneralUtils.replace_elements!(neuronInactivityCounter_cpu, 0.0, wRec_cpu, -1.0) + # reset lif_inactivity elements to -9 + GeneralUtils.replace_elements!(neuronInactivityCounter_cpu, 0.0, -9.0) # -9.0 is base value + #WORKING neuroplasticity - wRec_cpu = neuroplasticity(synapticConnectionNumber, zit_cumulative_cpu, wRec_cpu, - inactivityCounter_cpu) + wRec_cpu = neuroplasticity(synapticConnectionNumber, + zitCumulative_cpu, + wRec_cpu, + neuronInactivityCounter_cpu, + synapticInactivityCounter_cpu) error("DEBUG -> lifLearn! $(Dates.now())") # #TODO send to device with correct dimension # wRec = wRec |> device @@ -333,9 +342,10 @@ end function alifLearn!(wRec, wRecChange, arrayProjection4d, - inactivityCounter, + neuronInactivityCounter, + synapticInactivityCounter, synapticConnectionNumber, - zit_cumulative, + zitCumulative, device) # merge learning weight with average learning weight wRec .+= (sum(wRecChange, dims=4) ./ (size(wRec, 4))) .* arrayProjection4d @@ -365,9 +375,10 @@ function onLearn!(wOut, end function neuroplasticity(synapticConnectionNumber, - zit_cumulative, # (row, col) + zitCumulative, # (row, col) wRec, # (row, col, n) - inactivityCounter_cpu) # (row, col, n) + neuronInactivityCounter, #WORKING neuron die i.e. reset all weight + synapticInactivityCounter) # (row, col, n) i1,i2,i3 = size(wRec) @@ -376,42 +387,52 @@ function neuroplasticity(synapticConnectionNumber, subToFireNeuron_toBe = Int(floor(0.7 * synapticConnectionNumber)) subToNonFiringNeuron_toBe = synapticConnectionNumber - subToFireNeuron_toBe - #WORKING for each neuron, count how many synap already subscribed to firing-neurons - subToFireNeuron_current = sum((!iszero).(zit_cumulative .* wRec), dims=(1,2)) # (1, 1, n) - subToNonFiringNeuron_current = synapticConnectionNumber .- subToFireNeuron_current # (1, 1, n) - mask = (!iszero).(zit_cumulative) # mask of firing neurons = 1, non-firing = 0 + # for each neuron, count how many synap already subscribed to firing-neurons + zw = zitCumulative .* wRec + subToFireNeuron_current = sum(GeneralUtils.isBetween.(zw, 0.0, 100.0), dims=(1,2)) # (1, 1, n) + zitMask = (!iszero).(zitCumulative) # zitMask of firing neurons = 1, non-firing = 0 projection = ones(i1,i2,i3) - mask = mask .* projection # (row, col, n) + zitMask = zitMask .* projection # (row, col, n) totalNewConn = sum(isequal.(wRec, -1.0), dims=(1,2)) # count new conn mark (-1.0), (1, 1, n) - println("mask ", size(mask)) - println("wRec ", size(wRec)) - println("inactivityCounter_cpu ", size(inactivityCounter_cpu)) - println("totalNeurons ", totalNewConn, size(totalNewConn)) - error("DEBUG -> neuroplasticity $(Dates.now())") + + #WORKING clear -1.0 marker + GeneralUtils.replace_elements!(wRec, -1.0, synapticInactivityCounter, -9.0) + GeneralUtils.replace_elements!(wRec, -1.0, 0.0) # -1.0 marker is no longer required + + println("/////////") + println("wRec 1 ", wRec[:,:,1]) + println("synapticInactivityCounter 1 ", synapticInactivityCounter[:,:,1]) + for i in 1:i3 - - - # add new conn to firing neurons pool - remaining = GeneralUtils.replace_elements(mask[:,:,i], - 1, - wRecmask[:,:,i], - inactivityCounter_cpumask[:,:,i], - totalNewConn[:,:,i]) + remaining = 0 + if subToFireNeuron_current[1,1,i] < subToFireNeuron_toBe + toAddConn = subToFireNeuron_toBe - subToFireNeuron_current[1,1,i] + totalNewConn[1,1,i] = totalNewConn[1,1,i] - toAddConn + # add new conn to firing neurons pool + remaining = addNewSynapticConn!(zitMask[:,:,i], 1, + @view(wRec[:,:,i]), + @view(synapticInactivityCounter[:,:,i]), + toAddConn) + totalNewConn[1,1,i] += remaining + end - #TODO add new conn to non-firing neurons pool - + # add new conn to non-firing neurons pool + remaining = addNewSynapticConn!(zitMask[:,:,i], 0, + @view(wRec[:,:,i]), + @view(synapticInactivityCounter[:,:,i]), + totalNewConn[1,1,i]) + if remaining > 0 # final get-all round if somehow non-firing pool has not enough slot + remaining = addNewSynapticConn!(zitMask[:,:,i], 1, + @view(wRec[:,:,i]), + @view(synapticInactivityCounter[:,:,i]), + remaining) + end end + println("==========") + println("wRec 2 ", wRec[:,:,1]) + println("synapticInactivityCounter 2 ", synapticInactivityCounter[:,:,1]) - - - - - - newFiringConn = subToFireNeuron_toBe - subToFireNeuron_current - newFiringConn = newFiringConn > 0 ? newFiringConn : 0 - - newNonFiringConn = subToNonFiringNeuron_toBe - subToNonFiringNeuron_current - + # error("DEBUG -> neuroplasticity $(Dates.now())") return wRec end diff --git a/src/snnUtil.jl b/src/snnUtil.jl index deabccb..ca7c024 100644 --- a/src/snnUtil.jl +++ b/src/snnUtil.jl @@ -1,8 +1,8 @@ module snnUtil -export refractoryStatus! +export refractoryStatus!, addNewSynapticConn! -# using +using Random #------------------------------------------------------------------------------------------------100 @@ -21,28 +21,132 @@ function refractoryStatus!(refractoryCounter, refractoryActive, refractoryInacti end end -function frobenius_distance(A, B) - # Check if the matrices have the same size - if size(A) != size(B) - error("The matrices must have the same size") +# function frobenius_distance(A, B) +# # Check if the matrices have the same size +# if size(A) != size(B) +# error("The matrices must have the same size") +# end +# # Initialize the distance to zero +# distance = 0.0 +# # Loop over the elements of the matrices and add the squared differences +# for i in 1:size(A, 1) +# for j in 1:size(A, 2) +# distance += (A[i, j] - B[i, j])^2 +# end +# end +# # Return the square root of the distance +# return sqrt(distance) +# end + +function addNewSynapticConn!(mask::AbstractArray{<:Any}, x::Number, wRec::AbstractArray{<:Any}, + counter::AbstractArray{<:Any}, n=0; + rng::AbstractRNG=MersenneTwister(1234)) + # println("mask ", mask, size(mask)) + # println("") + # println("x ", x, size(x)) + # println("") + # println("wRec ", wRec, size(wRec)) + # println("") + # println("counter ", counter, size(counter)) + # println("") + # println("n ", n, size(n)) + # println("") + + total_x_tobeReplced = sum(isequal.(mask, x)) + remaining = 0 + if n == 0 || n > total_x_tobeReplced + remaining = n - total_x_tobeReplced + n = total_x_tobeReplced end - # Initialize the distance to zero - distance = 0.0 - # Loop over the elements of the matrices and add the squared differences - for i in 1:size(A, 1) - for j in 1:size(A, 2) - distance += (A[i, j] - B[i, j])^2 + + # check if mask and wRec have the same size + if size(mask) != size(wRec) + error("mask and wRec must have the same size") + end + # get the indices of elements in mask that equal x + indices = findall(x -> x == x, mask) + alreadySub = findall(x -> x != 0, wRec) # get already subscribe + setdiff!(indices, alreadySub) # remove already sub conn from pool + + # shuffle the indices using the rng function + shuffle!(rng, indices) + # select the first n indices + selected = indices[1:n] + # replace the elements in wRec at the selected positions with a + for i in selected + wRec[i] = 0.1 #rand(0.1:0.01:0.3) + if counter !== nothing + counter[i] = 0 # reset end end - # Return the square root of the distance - return sqrt(distance) + # println("==================") + # println("mask ", mask, size(mask)) + # println("") + # println("x ", x, size(x)) + # println("") + # println("wRec ", wRec, size(wRec)) + # println("") + # println("counter ", counter, size(counter)) + # println("") + # println("n ", n, size(n)) + # println("") + # error("DEBUG addNewSynapticConn!") + return remaining end +# function addNewSynapticConn!(mask::AbstractArray{<:Any}, x::Number, A::AbstractArray{<:Any}, +# A2::AbstractArray{<:Any}, n=0; +# rng::AbstractRNG=MersenneTwister(1234)) +# # println("mask ", mask, size(mask)) +# # println("") +# # println("x ", x, size(x)) +# # println("") +# # println("A ", A, size(A)) +# # println("") +# # println("A2 ", A2, size(A2)) +# # println("") +# # println("n ", n, size(n)) +# # println("") +# total_x_tobeReplced = sum(isequal.(mask, x)) +# remaining = 0 +# if n == 0 || n > total_x_tobeReplced +# remaining = n - total_x_tobeReplced +# n = total_x_tobeReplced +# end - - +# # check if mask and A have the same size +# if size(mask) != size(A) +# error("mask and A must have the same size") +# end +# # get the indices of elements in mask that equal x +# indices = findall(x -> x == x, mask) +# # shuffle the indices using the rng function +# shuffle!(rng, indices) +# # select the first n indices +# selected = indices[1:n] +# # replace the elements in A at the selected positions with a +# for i in selected +# A[i] = rand(0.1:0.01:0.3) +# if A2 !== nothing +# A2[i] = 10000 +# end +# end +# # println("==================") +# # println("mask ", mask, size(mask)) +# # println("") +# # println("x ", x, size(x)) +# # println("") +# # println("A ", A, size(A)) +# # println("") +# # println("A2 ", A2, size(A2)) +# # println("") +# # println("n ", n, size(n)) +# # println("") +# # error("DEBUG addNewSynapticConn!") +# return remaining +# end diff --git a/src/type.jl b/src/type.jl index 7b203ea..d6bea19 100644 --- a/src/type.jl +++ b/src/type.jl @@ -23,7 +23,7 @@ Base.@kwdef mutable struct kfn_1 <: knowledgeFn learningStage::Union{AbstractArray, Nothing} = nothing # 0 inference, 1 start, 2 during, 3 end learning inputSize::Union{AbstractArray, Nothing} = nothing zit::Union{AbstractArray, Nothing} = nothing # 3D activation matrix - zit_cumulative::Union{AbstractArray, Nothing} = nothing + zitCumulative::Union{AbstractArray, Nothing} = nothing exInType::Union{AbstractArray, Nothing} = nothing modelError::Union{AbstractArray, Nothing} = nothing # store RSNN error outputError::Union{AbstractArray, Nothing} = nothing # store output neurons error @@ -185,7 +185,7 @@ function kfn_1(params::Dict; device=cpu) # activation matrix kfn.zit = zeros(row, col, batch) |> device - kfn.zit_cumulative = (similar(kfn.zit) .= 0) + kfn.zitCumulative = (similar(kfn.zit) .= 0) kfn.modelError = zeros(1) |> device # ---------------------------------------------------------------------------- # @@ -237,7 +237,7 @@ function kfn_1(params::Dict; device=cpu) kfn.lif_neuronInactivityCounter = (similar(kfn.lif_wRec) .= 10000) kfn.lif_synapticInactivityCounter = Array(similar(kfn.lif_wRec) .= -9) # -9 for non-sub conn mask = Array((!iszero).(kfn.lif_wRec)) - GeneralUtils.replace_elements!(mask, 1, kfn.lif_synapticInactivityCounter, 10000) + GeneralUtils.replace_elements!(mask, 1, kfn.lif_synapticInactivityCounter, 0) # initial value subscribed conn kfn.lif_synapticInactivityCounter = kfn.lif_synapticInactivityCounter |> device kfn.lif_arrayProjection4d = (similar(kfn.lif_wRec) .= 1) @@ -296,7 +296,7 @@ function kfn_1(params::Dict; device=cpu) kfn.alif_neuronInactivityCounter = (similar(kfn.alif_wRec) .= 10000) kfn.alif_synapticInactivityCounter = Array(similar(kfn.alif_wRec) .= -9) # -9 for non-sub conn mask = Array((!iszero).(kfn.alif_wRec)) - GeneralUtils.replace_elements!(mask, 1, kfn.alif_synapticInactivityCounter, 10000) + GeneralUtils.replace_elements!(mask, 1, kfn.alif_synapticInactivityCounter, 0) # initial value subscribed conn kfn.alif_synapticInactivityCounter = kfn.alif_synapticInactivityCounter |> device kfn.alif_arrayProjection4d = (similar(kfn.alif_wRec) .= 1) @@ -333,7 +333,6 @@ function kfn_1(params::Dict; device=cpu) synapticConnection = Int(floor(subable * synapticConnectionPercent/100)) for slice in eachslice(w, dims=3) # each slice is a neuron startInd = row*col - subable + 1 # e.g. 100(row*col) - 50(subable) = 50 -> startInd = 51 - # pool must contain only lif, alif neurons pool = shuffle!([startInd:row*col...])[1:synapticConnection] for i in pool @@ -342,9 +341,9 @@ function kfn_1(params::Dict; device=cpu) end end - # # 10% of neuron connection should be enough to start to make neuron fires - # should_be_avg_weight = 1 / (0.2 * n) - # w = w .* (should_be_avg_weight / maximum(w)) # adjust overall weight + # 10% of neuron connection should be enough to start to make neuron fires + should_be_avg_weight = 1 / (0.1 * n) + w = w .* (should_be_avg_weight / maximum(w)) # adjust overall weight # project 3D w into 4D kfn.lif_wOut (row, col, n, batch) kfn.on_wOut = reshape(w, (row, col, n, 1)) .* ones(row, col, n, batch) |> device