time-based learning method based on new error formula

This commit is contained in:
2023-05-16 20:56:05 +07:00
parent 114161ba69
commit 70d2521c5e
5 changed files with 146 additions and 227 deletions

View File

@@ -3,13 +3,13 @@ module snn_utils
using Flux.Optimise: apply!
export calculate_α, calculate_ρ, calculate_k, timestep_forward!, init_neuron, no_negative!,
precision, calculate_w_change!, store_knowledgefn_error!, interneurons_adjustment!,
reset_z_t!, reset_learning_params!, reset_learning_history_params!,
reset_z_t!, resetLearningParams!, reset_learning_history_params!,
cal_v_reg!, calculate_w_change_end!,
firing_rate_error!, firing_rate_regulator!, update_Bn!, cal_firing_reg!,
neuroplasticity!, shakeup!, reset_learning_no_wchange!, adjust_internal_learning_rate!,
gradient_withloss
using Statistics, Random, LinearAlgebra, Distributions, Zygote
using Statistics, Random, LinearAlgebra, Distributions, Zygote, Flux
using ..types
@@ -98,21 +98,19 @@ reset_b_change!(n::linear_neuron) = n.b_change = n.b_change * 0.0
""" Reset all learning-related params at the END of learning session
"""
function reset_learning_params!(n::lif_neuron)
function resetLearningParams!(n::lif_neuron)
reset_epsilon_rec!(n)
reset_w_rec_change!(n)
# reset_v_t!(n)
# reset_z_t!(n)
reset_firing_counter!(n)
reset_firing_diff!(n)
reset_previous_error!(n)
reset_error!(n)
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
# refractory state, it will stay in refractory state forever
reset_refractoryCounter!(n)
end
function reset_learning_params!(n::alif_neuron)
function resetLearningParams!(n::alif_neuron)
reset_epsilon_rec!(n)
reset_epsilon_rec_a!(n)
reset_w_rec_change!(n)
@@ -121,8 +119,6 @@ function reset_learning_params!(n::alif_neuron)
# reset_a!(n)
reset_firing_counter!(n)
reset_firing_diff!(n)
reset_previous_error!(n)
reset_error!(n)
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
# refractory state, it will stay in refractory state forever
@@ -132,18 +128,15 @@ end
# function reset_learning_no_wchange!(n::passthrough_neuron)
# end
function reset_learning_params!(n::passthrough_neuron)
function resetLearningParams!(n::passthrough_neuron)
# skip
end
#WORKING
function reset_learning_params!(n::linear_neuron)
function resetLearningParams!(n::linear_neuron)
reset_epsilon_rec!(n)
reset_w_rec_change!(n)
reset_v_t!(n)
reset_firing_counter!(n)
reset_firing_diff!(n)
reset_previous_error!(n)
reset_error!(n)
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
# refractory state, it will stay in refractory state forever
@@ -288,14 +281,19 @@ function push_epsilon_rec_a!(n::alif_neuron)
push!(n.epsilonRecA, 0)
end
""" compute synaptic connection strength. bias will shift currentStrength to fit into
sigmoid operating range which centred at 0 and range is -37 to 37.
# Example
synaptic strength range is 0 to 10
one may use bias = -5 to transform synaptic strength into range -5 to 5
the return value is shifted back to original scale
"""
function synapticConnStrength(currentStrength::AbstractFloat, bias::Number=0)
currentStrength += bias
currentStrength - (1.0 - sigmoid(currentStrength))
currentStrength -= bias
return currentStrength
end