diff --git a/src/learn.jl b/src/learn.jl index cc17e6f..d5459b2 100644 --- a/src/learn.jl +++ b/src/learn.jl @@ -147,6 +147,7 @@ function learn!(n::alif_neuron, error::Number) ΔwRecChange = n.eta * error n.wRecChange = (n.subExInType * n.wRecChange) + ΔwRecChange reset_epsilonRec!(n) + reset_epsilonRecA!(n) end """ linear_neuron learn() diff --git a/src/snn_utils.jl b/src/snn_utils.jl index 5379163..32e0aa6 100644 --- a/src/snn_utils.jl +++ b/src/snn_utils.jl @@ -4,7 +4,7 @@ using Flux.Optimise: apply! export calculate_α, calculate_ρ, calculate_k, timestep_forward!, init_neuron, no_negative!, precision, calculate_w_change!, store_knowledgefn_error!, interneurons_adjustment!, reset_z_t!, resetLearningParams!, reset_learning_history_params!, reset_epsilonRec!, - cal_v_reg!, calculate_w_change_end!, + reset_epsilonRecA!, firing_rate_error!, firing_rate_regulator!, update_Bn!, cal_firing_reg!, neuroplasticity!, shakeup!, reset_learning_no_wchange!, adjust_internal_learning_rate!, gradient_withloss @@ -34,7 +34,7 @@ reset_v_t!(n::neuron) = n.v_t = n.vRest reset_z_t!(n::compute_neuron) = n.z_t = false reset_epsilonRec!(n::compute_neuron) = n.epsilonRec = n.epsilonRec * 0.0 reset_epsilonRec!(n::output_neuron) = n.epsilonRec = n.epsilonRec * 0.0 -reset_epsilon_rec_a!(n::alif_neuron) = n.epsilonRecA = n.epsilonRecA * 0.0 +reset_epsilonRecA!(n::alif_neuron) = n.epsilonRecA = n.epsilonRecA * 0.0 reset_epsilon_in!(n::compute_neuron) = n.epsilon_in = isnothing(n.epsilon_in) ? nothing : n.epsilon_in * 0.0 reset_error!(n::Union{compute_neuron, linear_neuron}) = n.error = nothing reset_w_in_change!(n::compute_neuron) = n.w_in_change = isnothing(n.w_in_change) ? nothing : n.w_in_change * 0.0 @@ -75,7 +75,7 @@ reset_b_change!(n::linear_neuron) = n.b_change = n.b_change * 0.0 # end # function reset_learning_no_wchange!(n::Union{alif_neuron, elif_neuron}) # reset_epsilonRec!(n) -# reset_epsilon_rec_a!(n) +# reset_epsilonRecA!(n) # reset_v_t!(n) # reset_z_t!(n) # # reset_a!(n) @@ -113,7 +113,7 @@ function resetLearningParams!(n::lif_neuron) end function resetLearningParams!(n::alif_neuron) reset_epsilonRec!(n) - reset_epsilon_rec_a!(n) + reset_epsilonRecA!(n) reset_wRecChange!(n) # reset_v_t!(n) # reset_z_t!(n) @@ -300,10 +300,17 @@ end function synapticConnStrength(n::compute_neuron) for (i, connStrength) in enumerate(n.synapticStrength) - n. - synapticConnStrength + #WORKING + # check whether connStrength increase or decrease based on usage from n.epsilonRec + + # compute synaptic strength for this conn + + # apply conn lowerlimit and upperlimit + + # at lowerlimit, mark wRec at this position to 0. for new random synaptic conn + end end