refractoring

This commit is contained in:
2023-05-10 20:38:23 +07:00
commit 7c4a0dfa6f
15 changed files with 3195 additions and 0 deletions

236
src/forward.jl Normal file
View File

@@ -0,0 +1,236 @@
module forward
using Flux.Optimise: apply!
using Statistics, Flux, Random, LinearAlgebra
using GeneralUtils
using ..types, ..snn_utils
#------------------------------------------------------------------------------------------------100
""" Model forward()
"""
function (m::model)(input_data::AbstractVector)
# m.global_tick += 1
m.time_stamp += 1
# process all corresponding KFN
raw_model_respond = m.knowledgeFn[:I](m, input_data)
# the 2nd return (KFN error) should not be used as model error but I use it because there is
# only one KFN in a model right now
return raw_model_respond
end
#------------------------------------------------------------------------------------------------100
""" knowledgeFn forward()
"""
function (kfn::kfn_1)(m::model, input_data::AbstractVector)
kfn.time_stamp = m.time_stamp
kfn.softreset = m.softreset
kfn.learning_stage = m.learning_stage
kfn.error = m.error
# generate noise
noise = [GeneralUtils.randomChoiceWithProb([true, false],[0.5,0.5])
for i in 1:length(input_data)]
# noise = [rand(rng, Distributions.Binomial(1, 0.5)) for i in 1:10] # another option
input_data = [noise; input_data] # noise start from neuron id 1
for n in kfn.neurons_array
timestep_forward!(n)
end
for n in kfn.output_neurons_array
timestep_forward!(n)
end
kfn.learning_stage = m.learning_stage
if kfn.learning_stage == "start_learning"
# reset params here instead of at the end_learning so that neuron's parameter data
# don't gets wiped and can be logged for visualization later
for n in kfn.neurons_array
# epsilon_rec need to be reset because it counting how many each synaptic fires and
# use this info to calculate how much synaptic weight should be adjust
reset_learning_params!(n)
end
# clear variables
kfn.firing_neurons_list = Vector{Int64}()
kfn.outputs = nothing
end
# pass input_data into input neuron.
# number of data point equals to number of input neuron starting from id 1
for (i, data) in enumerate(input_data)
kfn.neurons_array[i].z_t1 = data
end
kfn.snn_firing_state_t0 = [n.z_t for n in kfn.neurons_array] #TODO check if it is used?
#CHANGE Threads.@threads for n in kfn.neurons_array
for n in kfn.neurons_array
n(kfn)
end
kfn.snn_firing_state_t1 = [n.z_t1 for n in kfn.neurons_array]
append!(kfn.firing_neurons_list, findall(kfn.snn_firing_state_t1)) # store id of neuron that fires
if kfn.learning_stage == "end_learning" # use for random new neuron connection
kfn.firing_neurons_list |> unique!
end
# Threads.@threads for n in kfn.output_neurons_array
for n in kfn.output_neurons_array
n(kfn)
end
out = [n.out_t1 for n in kfn.output_neurons_array]
return out
end
#------------------------------------------------------------------------------------------------100
""" passthrough_neuron forward()
"""
function (n::passthrough_neuron)(kfn::knowledgeFn)
n.time_stamp = kfn.time_stamp
# n.global_tick = kfn.global_tick
end
#------------------------------------------------------------------------------------------------100
""" lif_neuron forward()
"""
function (n::lif_neuron)(kfn::knowledgeFn)
n.time_stamp = kfn.time_stamp
# pulling other neuron's firing status at time t
n.z_i_t = getindex(kfn.snn_firing_state_t0, n.subscription_list)
n.z_i_t .*= n.sub_ExIn_type
if n.refractory_counter != 0
n.refractory_counter -= 1
# neuron is in refractory state, skip all calculation
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike
# last only 1 timestep follow by a period of refractory.
n.recurrent_signal = n.recurrent_signal * 0.0
# Exponantial decay of v_t1
n.v_t1 = n.v_t * n.alpha^(n.time_stamp - n.last_firing_time) # or n.v_t1 = n.alpha * n.v_t
else
n.recurrent_signal = sum(n.w_rec .* n.z_i_t) # signal from other neuron that this neuron subscribed
n.alpha_v_t = n.alpha * n.v_t
n.v_t1 = n.alpha_v_t + n.recurrent_signal
if n.v_t1 > n.v_th
n.z_t1 = true
n.refractory_counter = n.refractory_duration
n.firing_counter += 1
n.v_t1 = n.v_t1 - n.v_th
else
n.z_t1 = false
end
# there is a difference from alif formula
n.phi = (n.gamma_pd / n.v_th) * max(0, 1 - (n.v_t1 - n.v_th) / n.v_th)
end
end
#------------------------------------------------------------------------------------------------100
""" alif_neuron forward()
"""
function (n::alif_neuron)(kfn::knowledgeFn)
n.time_stamp = kfn.time_stamp
n.z_i_t = getindex(kfn.snn_firing_state_t0, n.subscription_list)
n.z_i_t .*= n.sub_ExIn_type
if n.refractory_counter != 0
n.refractory_counter -= 1
# neuron is in refractory state, skip all calculation
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike last only 1 timestep follow by a period of refractory.
n.a = (n.rho * n.a) + ((1 - n.rho) * n.z_t)
n.recurrent_signal = n.recurrent_signal * 0.0
# Exponantial decay of v_t1
n.v_t1 = n.v_t * n.alpha^(n.time_stamp - n.last_firing_time) # or n.v_t1 = n.alpha * n.v_t
n.phi = 0
else
n.z_t = isnothing(n.z_t) ? false : n.z_t
n.a = (n.rho * n.a) + ((1 - n.rho) * n.z_t)
n.av_th = n.v_th + (n.beta * n.a)
n.recurrent_signal = sum(n.w_rec .* n.z_i_t) # signal from other neuron that this neuron subscribed
n.alpha_v_t = n.alpha * n.v_t
n.v_t1 = n.alpha_v_t + n.recurrent_signal
if n.v_t1 > n.av_th
n.z_t1 = true
n.refractory_counter = n.refractory_duration
n.firing_counter += 1
n.v_t1 = n.v_t1 - n.v_th
else
n.z_t1 = false
end
# there is a difference from lif formula
n.phi = (n.gamma_pd / n.v_th) * max(0, 1 - (n.v_t1 - n.av_th) / n.v_th)
end
end
#------------------------------------------------------------------------------------------------100
""" linear_neuron forward()
In this implementation, each output neuron is fully connected to every lif and alif neuron.
"""
function (n::linear_neuron)(kfn::T) where T<:knowledgeFn
n.time_stamp = kfn.time_stamp
n.out_t1 = getindex(kfn.snn_firing_state_t1, n.subscription_list)[1]
end
end # end module