version 0.0.2

This commit is contained in:
ton
2023-08-06 08:15:44 +07:00
parent 56ec3757c9
commit 302f506b5b
11 changed files with 2755 additions and 23 deletions

View File

@@ -25,9 +25,8 @@ using .interface
#------------------------------------------------------------------------------------------------100
""" version 0.0.1
""" version 0.0.2
Todo:
[*1] knowledgeFn in GPU format
[] use partial error update for computeNeuron
[] use integrate_neuron_params synapticConnectionPercent LESS THAN 100%
[2] implement dormant connection and pruning machanism. the longer the training the longer
@@ -37,8 +36,8 @@ using .interface
[] Liquid time constant. training should include adjusting α, neuron membrane potential decay factor
which defined by neuron.tau_m formula in type.jl
Change from version:
-
Change from version: 0.0.1
- knowledgeFn in GPU format
All features

View File

@@ -18,7 +18,19 @@ function (kfn::kfn_1)(input::AbstractArray)
#TODO time step forward
if view(kfn.learningStage, 1)[1] == 1
# reset learning params
# kfn.learningStage = [2]
kfn.lif_vt .= 0
kfn.lif_wRecChange .= 0
kfn.lif_epsilonRec .= 0
kfn.alif_vt .= 0
kfn.alif_epsilonRec .= 0
kfn.alif_wRecChange .= 0
kfn.on_vt .= 0
kfn.on_epsilonRec .= 0
kfn.on_wOutChange .= 0
kfn.learningStage = [2]
end
# update activation matrix with "lif_zt1" and "alif_zt1" by concatenating
@@ -27,12 +39,6 @@ function (kfn::kfn_1)(input::AbstractArray)
reshape(kfn.lif_zt, (size(input, 1), :, 1, size(input, 3))),
reshape(kfn.alif_zt, (size(input, 1), :, 1, size(input, 3))), dims=2)
kfn.zit .= reshape(_zit, (size(input, 1), :, size(input, 3)))
# pass input_data into input neuron.
# GeneralUtils.cartesianAssign!(kfn.zit, input)
# kfn.zit = kfn.zit |> device
# input = input |> device
# project 3D kfn zit into 4D lif zit
i1, i2, i3, i4 = size(kfn.lif_zit)

View File

@@ -42,8 +42,6 @@ function compute_paramsChange!(kfn::kfn_1, modelError, outputError)
# error("DEBUG -> kfn compute_paramsChange! $(Dates.now())")
end
function lifComputeParamsChange!( phi::CuArray,
epsilonRec::CuArray,
eta::CuArray,
@@ -197,15 +195,18 @@ end
function learn!(kfn::kfn_1)
# lif learn
lifLearn!(kfn.lif_wRec,
kfn.lif_wRecChange)
kfn.lif_wRecChange,
kfn.lif_arrayProjection4d)
# alif learn
alifLearn!(kfn.alif_wRec,
kfn.alif_wRecChange)
kfn.alif_wRecChange,
kfn.alif_arrayProjection4d)
# on learn
onLearn!(kfn.on_wOut,
kfn.on_wOutChange)
kfn.on_wOutChange,
kfn.on_arrayProjection4d)
# wOut decay
kfn.on_wOut .*= 0.0001
@@ -218,9 +219,11 @@ function learn!(kfn::kfn_1)
end
function lifLearn!(wRec,
wRecChange)
# merge learning weight
wRec .+= wRecChange
wRecChange,
arrayProjection4d)
# merge learning weight with average learning weight
wRec .+= (sum(wRecChange) ./ (size(wRec, 4))) .* arrayProjection4d
#TODO synaptic strength
@@ -229,9 +232,10 @@ function lifLearn!(wRec,
end
function alifLearn!(wRec,
wRecChange)
wRecChange,
arrayProjection4d)
# merge learning weight
wRec .+= wRecChange
wRec .+= (sum(wRecChange) ./ (size(wRec, 4))) .* arrayProjection4d
#TODO synaptic strength
@@ -240,9 +244,10 @@ function alifLearn!(wRec,
end
function onLearn!(wOut,
wOutChange)
wOutChange,
arrayProjection4d)
# merge learning weight
wOut .+= wOutChange
wOut .+= (sum(wOutChange) ./ (size(wOut, 4))) .* arrayProjection4d
#TODO synaptic strength