refractoring

This commit is contained in:
2023-05-12 19:50:02 +07:00
parent 668fa77595
commit 89371736e4
5 changed files with 382 additions and 400 deletions

View File

@@ -32,24 +32,22 @@ no_negative!(x) = x < 0.0 ? 0.0 : x
precision(x::Array{<:Array}) = ( std(mean.(x)) / mean(mean.(x)) ) * 100
# reset functions for LIF/ALIF neuron
reset_last_firing_time!(n::compute_neuron) = n.last_firing_time = 0.0
reset_last_firing_time!(n::compute_neuron) = n.lastFiringTime = 0.0
reset_refractory_state_active!(n::compute_neuron) = n.refractory_state_active = false
reset_v_t!(n::compute_neuron) = n.v_t = n.v_t_default
reset_z_t!(n::compute_neuron) = n.z_t = false
reset_epsilon_rec!(n::compute_neuron) = n.epsilon_rec = n.epsilon_rec * 0.0
reset_epsilon_rec_a!(n::alif_neuron) = n.epsilon_rec_a = n.epsilon_rec_a * 0.0
reset_epsilon_rec!(n::compute_neuron) = n.epsilonRec = n.epsilonRec * 0.0
reset_epsilon_rec_a!(n::alif_neuron) = n.epsilonRecA = n.epsilonRecA * 0.0
reset_epsilon_in!(n::compute_neuron) = n.epsilon_in = isnothing(n.epsilon_in) ? nothing : n.epsilon_in * 0.0
reset_error!(n::Union{compute_neuron, linear_neuron}) = n.error = nothing
reset_w_in_change!(n::compute_neuron) = n.w_in_change = isnothing(n.w_in_change) ? nothing : n.w_in_change * 0.0
reset_w_rec_change!(n::compute_neuron) = n.w_rec_change = n.w_rec_change * 0.0
reset_w_rec_change!(n::compute_neuron) = n.wRecChange = n.wRecChange * 0.0
reset_a!(n::alif_neuron) = n.a = n.a * 0.0
reset_reg_voltage_a!(n::compute_neuron) = n.reg_voltage_a = n.reg_voltage_a * 0.0
reset_reg_voltage_b!(n::compute_neuron) = n.reg_voltage_b = n.reg_voltage_b * 0.0
reset_reg_voltage_error!(n::compute_neuron) = n.reg_voltage_error = n.reg_voltage_error * 0.0
reset_firing_counter!(n::compute_neuron) = n.firing_counter = n.firing_counter * 0.0
reset_firing_diff!(n::Union{compute_neuron, linear_neuron}) = n.firing_diff = n.firing_diff * 0.0
reset_previous_error!(n::Union{compute_neuron}) =
n.previous_error = n.previous_error * 0.0
reset_firing_counter!(n::compute_neuron) = n.firingCounter = n.firingCounter * 0.0
reset_firing_diff!(n::Union{compute_neuron, linear_neuron}) = n.firingDiff = n.firingDiff * 0.0
# reset function for output neuron
reset_epsilon_j!(n::linear_neuron) = n.epsilon_j = n.epsilon_j * 0.0
@@ -151,7 +149,7 @@ end
function store_knowledgefn_error!(kfn::knowledgeFn)
# condition to adjust nueron in KFN plane in addition to weight adjustment inside each neuron
if kfn.learning_stage == "start_learning"
if kfn.learningStage == "start_learning"
if kfn.recent_knowledgeFn_error === nothing && kfn.knowledgeFn_error === nothing
kfn.recent_knowledgeFn_error = [[]]
elseif kfn.recent_knowledgeFn_error === nothing
@@ -161,13 +159,13 @@ function store_knowledgefn_error!(kfn::knowledgeFn)
else
push!(kfn.recent_knowledgeFn_error, [kfn.knowledgeFn_error])
end
elseif kfn.learning_stage == "during_learning"
elseif kfn.learningStage == "during_learning"
if kfn.knowledgeFn_error === nothing
#skip
else
push!(kfn.recent_knowledgeFn_error[end], kfn.knowledgeFn_error)
end
elseif kfn.learning_stage == "end_learning"
elseif kfn.learningStage == "end_learning"
if kfn.recent_knowledgeFn_error === nothing
#skip
else
@@ -184,15 +182,15 @@ end
function update_Bn!(kfn::knowledgeFn)
Δw = nothing
for n in kfn.output_neurons_array
for n in kfn.outputNeuronsArray
Δw = Δw === nothing ? n.w_out_change : Δw + n.w_out_change
n.w_out = n.w_out - (n.Bn_wout_decay * n.w_out) # w_out decay
end
# Δw = Δw / kfn.kfn_params[:linear_neuron_number] # average
# Δw = Δw / kfn.kfnParams[:linear_neuron_number] # average
input_neuron_number = kfn.kfn_params[:input_neuron_number] # skip input neuron
for i = 1:kfn.kfn_params[:compute_neuron_number]
n = kfn.neurons_array[input_neuron_number+i]
input_neuron_number = kfn.kfnParams[:input_neuron_number] # skip input neuron
for i = 1:kfn.kfnParams[:compute_neuron_number]
n = kfn.neuronsArray[input_neuron_number+i]
n.Bn = n.Bn + Δw[i]
n.Bn = n.Bn - (n.Bn_wout_decay * n.Bn) # w_out decay
end
@@ -208,7 +206,7 @@ function cal_v_reg!(n::lif_neuron)
component_b = n.v_t1 - n.v_th < 0 ? 0 : n.v_t1 - n.v_th
#FIXME: not sure the following line is correct
n.reg_voltage_b = n.reg_voltage_b + (component_b * n.epsilon_rec)
n.reg_voltage_b = n.reg_voltage_b + (component_b * n.epsilonRec)
end
function cal_v_reg!(n::alif_neuron)
@@ -219,7 +217,7 @@ function cal_v_reg!(n::alif_neuron)
component_b = n.v_t1 - n.av_th < 0 ? 0 : n.v_t1 - n.av_th
#FIXME: not sure the following line is correct
n.reg_voltage_b = n.reg_voltage_b + (component_b * (n.epsilon_rec - n.epsilon_rec_a))
n.reg_voltage_b = n.reg_voltage_b + (component_b * (n.epsilonRec - n.epsilonRecA))
end
function voltage_error!(n::compute_neuron)
@@ -232,23 +230,23 @@ function voltage_regulator!(n::compute_neuron) # running average
return Δw
end
function firing_rate_error(kfn::knowledgeFn)
start_id = kfn.kfn_params[:input_neuron_number] + 1
return 0.5 * sum([(n.firing_diff)^2 for n in kfn.neurons_array[start_id:end]])
function firingRateError(kfn::knowledgeFn)
start_id = kfn.kfnParams[:input_neuron_number] + 1
return 0.5 * sum([(n.firingDiff)^2 for n in kfn.neuronsArray[start_id:end]])
end
function firing_rate_regulator!(n::compute_neuron)
# n.firing_rate NOT running average (average over learning batch)
# n.firingRate NOT running average (average over learning batch)
Δw = n.optimiser.eta * n.c_reg *
(n.firing_rate - n.firing_rate_target) * n.e_rec
Δw = n.firing_rate > n.firing_rate_target ? Δw : Δw * 0.0
(n.firingRate - n.firingRateTarget) * n.eRec
Δw = n.firingRate > n.firingRateTarget ? Δw : Δw * 0.0
return Δw
end
firing_rate!(n::compute_neuron) = n.firing_rate = (n.firing_counter / n.time_stamp) * 1000
firing_diff!(n::compute_neuron) = n.firing_diff = n.firing_rate - n.firing_rate_target
firing_rate!(n::compute_neuron) = n.firingRate = (n.firingCounter / n.timeStep) * 1000
firing_diff!(n::compute_neuron) = n.firingDiff = n.firingRate - n.firingRateTarget
function neuroplasticity!(n::compute_neuron, firing_neurons_list::Vector)
function neuroplasticity!(n::compute_neuron, firedNeurons::Vector)
# if there is 0-weight then replace it with new connection
zero_weight_index = findall(iszero.(n.w_rec))
if length(zero_weight_index) != 0
@@ -257,8 +255,8 @@ function neuroplasticity!(n::compute_neuron, firing_neurons_list::Vector)
not fire = no information
"""
subscribe_options = filter(x -> x [n.id], firing_neurons_list) # exclude this neuron id from the list
filter!(x -> x n.subscription_list, subscribe_options) # exclude this neuron's subscription_list from the list
subscribe_options = filter(x -> x [n.id], firedNeurons) # exclude this neuron id from the list
filter!(x -> x n.subscriptionList, subscribe_options) # exclude this neuron's subscriptionList from the list
shuffle!(subscribe_options)
end
@@ -266,7 +264,7 @@ function neuroplasticity!(n::compute_neuron, firing_neurons_list::Vector)
percentage = [new_connection_percent, 100.0 - new_connection_percent] / 100.0
for i in zero_weight_index
if Utils.random_choices([true, false], percentage)
n.subscription_list[i] = pop!(subscribe_options)
n.subscriptionList[i] = pop!(subscribe_options)
n.w_rec[i] = 0.01 # new connection should not send large signal otherwise it would throw
# RSNN off path. Let weight grow by an optimiser
end
@@ -283,7 +281,7 @@ function push_epsilon_rec_a!(n::lif_neuron)
end
function push_epsilon_rec_a!(n::alif_neuron)
push!(n.epsilon_rec_a, 0)
push!(n.epsilonRecA, 0)
end