refractoring
This commit is contained in:
763
src/types.jl
Normal file
763
src/types.jl
Normal file
@@ -0,0 +1,763 @@
|
||||
module types
|
||||
|
||||
export
|
||||
# struct
|
||||
IronpenStruct, model, knowledgeFn, lif_neuron, alif_neuron, linear_neuron,
|
||||
kfn_1, compute_neuron, neuron, output_neuron, passthrough_neuron,
|
||||
|
||||
# function
|
||||
instantiate_custom_types, init_neuron, populate_neuron,
|
||||
add_neuron!
|
||||
|
||||
using Random, Flux, LinearAlgebra
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
abstract type Ironpen end
|
||||
abstract type knowledgeFn <: Ironpen end
|
||||
abstract type neuron <: Ironpen end
|
||||
abstract type input_neuron <: neuron end
|
||||
abstract type output_neuron <: neuron end
|
||||
abstract type compute_neuron <: neuron end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Model struct
|
||||
"""
|
||||
Base.@kwdef mutable struct model <: Ironpen
|
||||
knowledgeFn::Union{Dict,Nothing} = nothing
|
||||
model_params::Union{Dict,Nothing} = nothing
|
||||
error::Union{Float64,Nothing} = 0.0
|
||||
output_error::Union{Array,Nothing} = Vector{AbstractFloat}()
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into w_rec_change then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge w_rec_change into w_rec then reset w_rec_change. """
|
||||
learning_stage::String = "inference"
|
||||
|
||||
softreset::Bool = false
|
||||
time_stamp::Number = 0.0
|
||||
end
|
||||
""" Model outer constructor
|
||||
|
||||
# Example
|
||||
I_kfnparams = Dict(
|
||||
:type => "lif_neuron",
|
||||
:v_t1 => 0.0, # neuron membrane potential at time = t+1
|
||||
:v_th => 2.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:z_t1 => false, # neuron firing status at time = t+1
|
||||
:gamma_pd => 0.3, # discount factor. The value is from the paper
|
||||
:phi => 0.0, # psuedo derivative
|
||||
:refractory_duration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 20.0, # membrane time constant in millisecond. The value is from the paper
|
||||
:eta => 0.01, # learning rate
|
||||
|
||||
I_kfn = Ironpen_ai_gpu.knowledgeFn(I_kfnparams, lif_neuron_params, alif_neuron_params,
|
||||
linear_neuron_params)
|
||||
|
||||
model_params_1 = Dict(:knowledgeFn => Dict(:I => I_kfn,
|
||||
:run => run_kfn),
|
||||
:learning_stage => "doing_inference",)
|
||||
|
||||
model_1 = Ironpen_ai_gpu.model(model_params_1)
|
||||
"""
|
||||
function model(params::Dict)
|
||||
m = model()
|
||||
m.model_params = params
|
||||
|
||||
fields = fieldnames(typeof(m))
|
||||
for i in fields
|
||||
if i in keys(params)
|
||||
m.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" knowledgeFn struct
|
||||
"""
|
||||
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
|
||||
knowledgefn_name::Union{String,Nothing} = nothing
|
||||
kfn_params::Union{Dict,Nothing} = nothing # store params of knowledgeFn itself for later use
|
||||
time_stamp::Number = 0.0
|
||||
|
||||
# Bn contain error coefficient for both neurons and output neurons in one place
|
||||
Bn::Vector{Float64} = Vector{Float64}() # error projection coefficient from kfn output's error to each neurons's error
|
||||
neurons_array::Union{Array,Nothing} = [] # put neurons here
|
||||
|
||||
""" put output neuron here. I seperate output neuron because
|
||||
1. its calculation is difference than other neuron types
|
||||
2. other neuron type will not induced to connnect to output neuron
|
||||
3. output neuron does not induced to connect to its own type """
|
||||
output_neurons_array::Union{Array,Nothing} = []
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into w_rec_change then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge w_rec_change into w_rec then reset w_rec_change. """
|
||||
learning_stage::String = "inference"
|
||||
|
||||
error::Union{Float64,Nothing} = nothing
|
||||
output_error::Union{Array,Nothing} = Vector{AbstractFloat}()
|
||||
recent_knowledgeFn_error::Union{Any,Nothing} = nothing
|
||||
softreset::Bool = false
|
||||
meta_params::Union{Dict{Any,Any},Nothing} = Dict()
|
||||
|
||||
firing_neurons_list::Array{Int64} = Vector{Int64}() # store id of firing neurons
|
||||
snn_firing_state_t0::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t0
|
||||
snn_firing_state_t1::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t1
|
||||
|
||||
avg_neurons_firing_rate::Union{Float64,Nothing} = 0.0 # for displaying average firing rate over all neurons
|
||||
avg_neurons_v_t1::Union{Float64,Nothing} = 0.0 # for displaying average v_t1 over all neurons
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Knowledge function outer constructor >>> auto generate <<<
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lif_neuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gamma_pd => 0.3, # discount factor. The value is from the paper
|
||||
:refractory_duration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alif_neuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gamma_pd => 0.3, # discount factor. The value is from the paper
|
||||
:refractory_duration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linear_neuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
I_kfnparams = Dict(
|
||||
:knowledgefn_name => "I",
|
||||
:lif_neuron_number => 200,
|
||||
:alif_neuron_number => 100, # from Allen Institute, ALIF is 40% of LIF
|
||||
:linear_neuron_number => 5, # output neuron, this is also the output length
|
||||
:Bn => "random", # error projection coefficient from kfn output's error to each neurons's error
|
||||
:learning_rate => 0.01,
|
||||
:neuron_connection_pattern => "100%", # number of each neuron subscribe to other neuron in knowledgeFn.neurons_array
|
||||
:output_neuron_connection_pattern => "100%", # "60%" of kfn.neurons_array or number
|
||||
:maximum_input_data_length => 5, # in case of GloVe word encoding, it is 300
|
||||
:neuron_w_in_generation_pattern => "random", # number or "random"
|
||||
:neuron_w_rec_generation_pattern => "random",
|
||||
:neuron_v_t_default => 0.5,
|
||||
:neuron_voltage_drop_percentage => "100%",
|
||||
:neuron_firing_rate_target => 50.0,
|
||||
:neuron_learning_rate => 0.01,
|
||||
:neuron_c_reg => 0.0001,
|
||||
:neuron_c_reg_v => 0.0001,
|
||||
:neuron_optimiser => "ADAM",
|
||||
:meta_params => Dict(:is_first_cycle => true,
|
||||
:launch_time => 0.0,))
|
||||
|
||||
kfn1 = knowledgeFn(kfn_params, lif_neuron_params, alif_neuron_params, linear_neuron_params)
|
||||
"""
|
||||
function kfn_1(kfn_params::Dict)
|
||||
|
||||
kfn = kfn_1()
|
||||
kfn.kfn_params = kfn_params
|
||||
kfn.knowledgefn_name = kfn.kfn_params[:knowledgefn_name]
|
||||
|
||||
if kfn.kfn_params[:compute_neuron_number] < kfn.kfn_params[:total_input_port]
|
||||
throw(error("number of compute neuron must be greater than input neuron"))
|
||||
end
|
||||
|
||||
# Bn
|
||||
if kfn.kfn_params[:Bn] == "random"
|
||||
kfn.Bn = [Random.rand(0:0.001:1) for i in 1:kfn.kfn_params[:compute_neuron_number]]
|
||||
else # in case I want to specify manually
|
||||
kfn.Bn = [kfn.kfn_params[:Bn] for i in 1:kfn.kfn_params[:compute_neuron_number]]
|
||||
end
|
||||
|
||||
# assign neurons ID by their position in kfn.neurons array because I think it is
|
||||
# straight forward way
|
||||
|
||||
# add input port
|
||||
for (k, v) in kfn.kfn_params[:input_port]
|
||||
current_type = kfn.kfn_params[:input_port][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neurons_array) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfn_params)
|
||||
push!(kfn.neurons_array, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
# add compute neurons
|
||||
for (k, v) in kfn.kfn_params[:compute_neuron]
|
||||
current_type = kfn.kfn_params[:compute_neuron][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neurons_array) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfn_params)
|
||||
push!(kfn.neurons_array, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
for i = 1:kfn.kfn_params[:output_port][:numbers]
|
||||
neuron = init_neuron(i, kfn.kfn_params[:output_port][:params],
|
||||
kfn.kfn_params)
|
||||
push!(kfn.output_neurons_array, neuron)
|
||||
end
|
||||
|
||||
# random which neuron output port subscribed to, 1-compute_neuron for each output port
|
||||
sub_list = shuffle!([kfn.kfn_params[:total_input_port]+1:length(kfn.neurons_array)...])
|
||||
sub_output_neuron = [pop!(sub_list) for i in 1:kfn.kfn_params[:output_port][:numbers]]
|
||||
for i in kfn.output_neurons_array
|
||||
i.subscription_list = [pop!(sub_output_neuron)]
|
||||
end
|
||||
|
||||
for n in kfn.neurons_array
|
||||
if typeof(n) <: compute_neuron
|
||||
n.firing_rate_target = kfn.kfn_params[:neuron_firing_rate_target]
|
||||
end
|
||||
end
|
||||
|
||||
# excitatory neuron to inhabitory neuron = 60:40 % of compute_neuron
|
||||
ex_number = Int(floor(0.6 * kfn.kfn_params[:compute_neuron_number]))
|
||||
ex_n = [1 for i in 1:ex_number]
|
||||
in_number = kfn.kfn_params[:compute_neuron_number] - ex_number
|
||||
in_n = [-1 for i in 1:in_number]
|
||||
ex_in = shuffle!([ex_n; in_n])
|
||||
|
||||
# input neurons are always excitatory, compute_neurons are random between excitatory
|
||||
# and inhabitory
|
||||
for n in reverse(kfn.neurons_array)
|
||||
try n.ExIn_type = pop!(ex_in) catch end
|
||||
end
|
||||
|
||||
# add ExIn_type into each compute_neuron sub_ExIn_type
|
||||
for n in reverse(kfn.neurons_array)
|
||||
try # input neuron doest have n.subscription_list
|
||||
for sub_id in n.subscription_list
|
||||
n_ExIn_type = kfn.neurons_array[sub_id].ExIn_type
|
||||
push!(n.sub_ExIn_type, n_ExIn_type)
|
||||
end
|
||||
catch
|
||||
end
|
||||
end
|
||||
|
||||
return kfn
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" passthrough_neuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct passthrough_neuron <: input_neuron
|
||||
id::Union{Int64,Nothing} = nothing # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "passthrough_neuron"
|
||||
knowledgefn_name::Union{String,Nothing} = nothing # knowledgeFn that this neuron belongs to
|
||||
z_t::Bool = false
|
||||
z_t1::Bool = false
|
||||
time_stamp::Number = 0.0 # current time
|
||||
ExIn_type::Integer = 1 # 1 excitatory, -1 inhabitory. input neuron is always excitatory
|
||||
end
|
||||
|
||||
function passthrough_neuron(params::Dict)
|
||||
n = passthrough_neuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" lif_neuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct lif_neuron <: compute_neuron
|
||||
id::Union{Int64,Nothing} = nothing # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "lif_neuron"
|
||||
ExIn_type::Integer = 1 # 1 excitatory, -1 inhabitory
|
||||
# Bn::Union{Float64,Nothing} = Random.rand() # Bias for neuron error
|
||||
knowledgefn_name::Union{String,Nothing} = nothing # knowledgeFn that this neuron belongs to
|
||||
subscription_list::Union{Array{Int64},Nothing} = nothing # list of other neuron that this neuron synapse subscribed to
|
||||
sub_ExIn_type::Array{Int64} = Vector{Int64}() # store ExIn type of subscribed neurons
|
||||
time_stamp::Number = 0.0 # current time
|
||||
w_rec::Union{Array{Float64},Nothing} = nothing # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = 0.0 # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_t_default::Union{Float64,Nothing} = 0.0 # default membrane potential voltage
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Union{Array{Bool},Nothing} = nothing # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
# Bn_wout_decay::Union{Float64,Nothing} = 0.01 # use to balance Bn and w_out
|
||||
|
||||
gamma_pd::Union{Float64,Nothing} = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Union{Float64,Nothing} = nothing # α, neuron membrane potential decay factor
|
||||
phi::Union{Float64,Nothing} = nothing # ϕ, psuedo derivative
|
||||
epsilon_rec::Union{Array{Float64},Nothing} = nothing # ϵ_rec, eligibility vector for neuron spike
|
||||
decayed_epsilon_rec::Union{Array{Float64},Nothing} = nothing # α * epsilon_rec
|
||||
e_rec::Union{Array{Float64},Nothing} = nothing # eligibility trace for neuron spike
|
||||
delta::Union{Float64,Nothing} = 1.0 # δ, discreate timestep size in millisecond
|
||||
last_firing_time::Union{Float64,Nothing} = 0.0 # the last time neuron fires
|
||||
refractory_duration::Union{Float64,Nothing} = 3 # neuron's refratory period in millisecond
|
||||
# refractory_state_active::Union{Bool,Nothing} = false # if true, neuron is in refractory state and cannot process new information
|
||||
refractory_counter::Integer = 0
|
||||
tau_m::Union{Float64,Nothing} = nothing # τ_m, membrane time constant in millisecond
|
||||
eta::Union{Float64,Nothing} = 0.01 # η, learning rate
|
||||
w_rec_change::Union{Array{Float64},Nothing} = nothing # Δw_rec, cumulated w_rec change
|
||||
recurrent_signal::Union{Float64,Nothing} = nothing # incoming recurrent signal
|
||||
alpha_v_t::Union{Float64,Nothing} = nothing # alpha * v_t
|
||||
voltage_drop_percentage::Union{Float64,Nothing} = 1.0 # voltage drop as a percentage of v_th
|
||||
error::Union{Float64,Nothing} = nothing # local neuron error
|
||||
optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firing_counter::Float64 = 0.0 # store how many times neuron fires
|
||||
firing_rate_target::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firing_diff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firing_rate_error::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firing_rate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
current_error::Union{Float64,Nothing} = 0.0
|
||||
previous_error::Union{Float64,Nothing} = 0.0
|
||||
error_diff::Union{Array{Float64},Nothing} = Vector{Float64}()
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into w_rec_change then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge w_rec_change into w_rec then reset w_rec_change. """
|
||||
learning_stage::String = "inference"
|
||||
end
|
||||
|
||||
""" lif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lif_neuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gamma_pd => 0.3, # discount factor. The value is from the paper
|
||||
:refractory_duration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
neuron1 = lif_neuron(lif_neuron_params)
|
||||
"""
|
||||
function lif_neuron(params::Dict)
|
||||
n = lif_neuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" alif_neuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct alif_neuron <: compute_neuron
|
||||
id::Union{Int64,Nothing} = nothing # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "alif_neuron"
|
||||
ExIn_type::Integer = -1 # 1 excitatory, -1 inhabitory
|
||||
# Bn::Union{Float64,Nothing} = Random.rand() # Bias for neuron error
|
||||
knowledgefn_name::Union{String,Nothing} = nothing # knowledgeFn that this neuron belongs to
|
||||
subscription_list::Union{Array{Int64},Nothing} = nothing # list of other neuron that this neuron synapse subscribed to
|
||||
sub_ExIn_type::Array{Int64} = Vector{Int64}() # store ExIn type of subscribed neurons
|
||||
time_stamp::Union{Number,Nothing} = nothing # current time
|
||||
w_rec::Union{Array{Float64},Nothing} = nothing # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = 0.0 # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_t_default::Union{Float64,Nothing} = 0.0
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Union{Array{Bool},Nothing} = nothing # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
# Bn_wout_decay::Union{Float64,Nothing} = 0.01 # use to balance Bn and w_out
|
||||
|
||||
alpha::Union{Float64,Nothing} = nothing # α, neuron membrane potential decay factor
|
||||
delta::Union{Float64,Nothing} = 1.0 # δ, discreate timestep size in millisecond
|
||||
epsilon_rec::Union{Array{Float64},Nothing} = nothing # ϵ_rec(v), eligibility vector for neuron i spike
|
||||
epsilon_rec_a::Union{Array{Float64},Nothing} = nothing # ϵ_rec(a)
|
||||
decayed_epsilon_rec::Union{Array{Float64},Nothing} = nothing # α * epsilon_rec
|
||||
e_rec_v::Union{Array{Float64},Nothing} = nothing # a component of neuron's eligibility trace resulted from v_t
|
||||
e_rec_a::Union{Array{Float64},Nothing} = nothing # a component of neuron's eligibility trace resulted from av_th
|
||||
e_rec::Union{Array{Float64},Nothing} = nothing # neuron's eligibility trace
|
||||
eta::Union{Float64,Nothing} = 0.01 # eta, learning rate
|
||||
gamma_pd::Union{Float64,Nothing} = 0.3 # γ_pd, discount factor, value from paper
|
||||
last_firing_time::Union{Float64,Nothing} = 0.0 # the last time neuron fires
|
||||
phi::Union{Float64,Nothing} = nothing # ϕ, psuedo derivative
|
||||
refractory_duration::Union{Float64,Nothing} = 3 # neuron's refractory period in millisecond
|
||||
# refractory_state_active::Union{Bool,Nothing} = false # if true, neuron is in refractory state and cannot process new information
|
||||
refractory_counter::Integer = 0
|
||||
tau_m::Union{Float64,Nothing} = nothing # τ_m, membrane time constant in millisecond
|
||||
w_rec_change::Union{Array{Float64},Nothing} = nothing # Δw_rec, cumulated w_rec change
|
||||
recurrent_signal::Union{Float64,Nothing} = nothing # incoming recurrent signal
|
||||
alpha_v_t::Union{Float64,Nothing} = nothing # alpha * v_t
|
||||
voltage_drop_percentage::Union{Float64,Nothing} = 1.0 # voltage drop as a percentage of v_th
|
||||
error::Union{Float64,Nothing} = nothing # local neuron error
|
||||
optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firing_counter::Float64 = 0.0 # store how many times neuron fires
|
||||
firing_rate_target::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firing_diff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firing_rate_error::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firing_rate::Float64 = 0.0 # running average of firing rate, Hz
|
||||
|
||||
|
||||
current_error::Union{Float64,Nothing} = 0.0
|
||||
previous_error::Union{Float64,Nothing} = 0.0
|
||||
error_diff::Union{Array{Float64},Nothing} = Vector{Float64}()
|
||||
|
||||
tau_a::Union{Float64,Nothing} = nothing # τ_a, adaption time constant in millisecond
|
||||
beta::Union{Float64,Nothing} = 0.15 # β, constant, value from paper
|
||||
rho::Union{Float64,Nothing} = nothing # ρ, threshold adaptation decay factor
|
||||
a::Union{Float64,Nothing} = 0.0 # threshold adaptation
|
||||
av_th::Union{Float64,Nothing} = nothing # adjusted neuron firing threshold
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into w_rec_change then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge w_rec_change into w_rec then reset w_rec_change. """
|
||||
learning_stage::String = "inference"
|
||||
|
||||
end
|
||||
""" alif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alif_neuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I
|
||||
use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gamma_pd => 0.3, # discount factor. The value is from the paper
|
||||
:refractory_duration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use
|
||||
for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
neuron1 = alif_neuron(alif_neuron_params)
|
||||
"""
|
||||
function alif_neuron(params::Dict)
|
||||
n = alif_neuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
""" linear_neuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct linear_neuron <: output_neuron
|
||||
id::Union{Int64,Nothing} = nothing # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "linear_neuron"
|
||||
knowledgefn_name::Union{String,Nothing} = nothing # knowledgeFn that this neuron belongs to
|
||||
subscription_list::Union{Array{Int64},Nothing} = nothing # list of other neuron that this neuron synapse subscribed to
|
||||
time_stamp::Union{Number,Nothing} = nothing # current time
|
||||
delta::Union{Float64,Nothing} = 1.0 # δ, discreate timestep size in millisecond
|
||||
out_t::Bool = false # output of linear neuron BEFORE forward()
|
||||
out_t1::Bool = false # output of linear neuron AFTER forward()
|
||||
end
|
||||
|
||||
""" linear neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linear_neuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
neuron1 = linear_neuron(linear_neuron_params)
|
||||
"""
|
||||
function linear_neuron(params::Dict)
|
||||
n = linear_neuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function load_optimiser(optimiser_name::String; params::Union{Dict,Nothing} = nothing)
|
||||
if optimiser_name == "AdaBelief"
|
||||
params = (0.01, (0.9, 0.8))
|
||||
return Flux.Optimise.AdaBelief(params...)
|
||||
elseif optimiser_name == "AdaBelief2"
|
||||
# output neuron requires slower change pace so η is lower than compute neuron at 0.007
|
||||
# because if w_out change too fast, compute neuron will not able to
|
||||
# grapse output neuron moving direction i.e. both compute neuron's direction and
|
||||
# output neuron direction are out of sync.
|
||||
params = (0.007, (0.9, 0.8))
|
||||
return Flux.Optimise.AdaBelief(params...)
|
||||
else
|
||||
error("optimiser is not defined yet in load_optimiser()")
|
||||
end
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::passthrough_neuron, n_params::Dict, kfn_params::Dict)
|
||||
n.id = id
|
||||
n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::lif_neuron, kfn_params::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
# subscription_options = shuffle!([1:(kfn_params[:input_neuron_number]+kfn_params[:compute_neuron_number])...])
|
||||
# if typeof(kfn_params[:synaptic_connection_number]) == String
|
||||
# percent = parse(Int, kfn_params[:synaptic_connection_number][1:end-1]) / 100
|
||||
# synaptic_connection_number = floor(length(subscription_options) * percent)
|
||||
# n.subscription_list = [pop!(subscription_options) for i = 1:synaptic_connection_number]
|
||||
# end
|
||||
# filter!(x -> x != n.id, n.subscription_list)
|
||||
# n.epsilon_rec = zeros(length(n.subscription_list))
|
||||
# n.w_rec = Random.rand(length(n.subscription_list))
|
||||
# n.w_rec_change = zeros(length(n.subscription_list))
|
||||
# n.reg_voltage_b = zeros(length(n.subscription_list))
|
||||
# n.alpha = calculate_α(n)
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::lif_neuron, n_params::Dict, kfn_params::Dict)
|
||||
n.id = id
|
||||
n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
subscription_options = shuffle!([1:kfn_params[:total_neurons]...])
|
||||
subscription_numbers = Int(floor(n_params[:synaptic_connection_number] *
|
||||
kfn_params[:total_neurons] / 100.0))
|
||||
n.subscription_list = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscription_list)
|
||||
|
||||
n.epsilon_rec = zeros(length(n.subscription_list))
|
||||
n.w_rec = Random.rand(length(n.subscription_list))
|
||||
n.w_rec_change = zeros(length(n.subscription_list))
|
||||
# n.reg_voltage_b = zeros(length(n.subscription_list))
|
||||
n.alpha = calculate_α(n)
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::alif_neuron, n_params::Dict,
|
||||
kfn_params::Dict)
|
||||
n.id = id
|
||||
n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
subscription_options = shuffle!([1:kfn_params[:total_neurons]...])
|
||||
subscription_numbers = Int(floor(n_params[:synaptic_connection_number] *
|
||||
kfn_params[:total_neurons] / 100.0))
|
||||
n.subscription_list = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscription_list)
|
||||
|
||||
n.epsilon_rec = zeros(length(n.subscription_list))
|
||||
n.w_rec = Random.rand(length(n.subscription_list))
|
||||
n.w_rec_change = zeros(length(n.subscription_list))
|
||||
# n.reg_voltage_b = zeros(length(n.subscription_list))
|
||||
n.alpha = calculate_α(n) # the more time has passed from the last time neuron was
|
||||
# activated, the more neuron membrane potential is reduced
|
||||
n.rho = calculate_ρ(n)
|
||||
n.epsilon_rec_a = zeros(length(n.subscription_list))
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::linear_neuron, kfn_params::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
# start_id = kfn_params[:input_neuron_number] + 1 # don't readout from input neurons
|
||||
# n.subscription_list = [start_id:(start_id+kfn_params[:compute_neuron_number]-1)...]
|
||||
# n.epsilon_j = zeros(length(n.subscription_list))
|
||||
# n.w_out = Random.randn(length(n.subscription_list))
|
||||
# n.w_out_change = zeros(length(n.subscription_list))
|
||||
# n.b = Random.randn()
|
||||
# n.b_change = 0.0
|
||||
# n.k = calculate_k(n)
|
||||
# end
|
||||
#WORKING
|
||||
function init_neuron!(id::Int64, n::linear_neuron, n_params::Dict, kfn_params::Dict)
|
||||
n.id = id
|
||||
n.knowledgefn_name = kfn_params[:knowledgefn_name]
|
||||
# start_id = kfn_params[:total_input_port] + 1 # don't readout from input neurons
|
||||
# subscription_options = [start_id:(start_id+kfn_params[:total_compute_neuron]-1)...]
|
||||
# n.subscription_list = [rand(subscription_options)]
|
||||
|
||||
# n.epsilon_j = zeros(length(n.subscription_list))
|
||||
# n.w_out = Random.randn(length(n.subscription_list))
|
||||
# n.w_out_change = zeros(length(n.subscription_list))
|
||||
# n.b = Random.randn()
|
||||
# n.b_change = 0.0
|
||||
# n.k = calculate_k(n)
|
||||
end
|
||||
|
||||
""" Make a neuron intended for use with knowledgeFn
|
||||
"""
|
||||
function init_neuron(id::Int64, n_params::Dict, kfn_params::Dict)
|
||||
n = instantiate_custom_types(n_params)
|
||||
init_neuron!(id, n, n_params, kfn_params)
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
""" This function instantiate Ironpen type.
|
||||
|
||||
# Example
|
||||
|
||||
new_model = instantiate_custom_types("model")
|
||||
"""
|
||||
function instantiate_custom_types(params::Union{Dict,Nothing} = nothing)
|
||||
type = string(split(params[:type], ".")[end])
|
||||
|
||||
if type == "model"
|
||||
return model()
|
||||
elseif type == "knowledgeFn"
|
||||
return knowledgeFn()
|
||||
elseif type == "passthrough_neuron"
|
||||
return passthrough_neuron(params)
|
||||
elseif type == "lif_neuron"
|
||||
return lif_neuron(params)
|
||||
elseif type == "alif_neuron"
|
||||
return alif_neuron(params)
|
||||
elseif type == "linear_neuron"
|
||||
return linear_neuron(params)
|
||||
else
|
||||
return nothing
|
||||
end
|
||||
end
|
||||
|
||||
""" Add a new neuron into a knowledgeFn
|
||||
|
||||
# Example
|
||||
add_neuron!(kfn.kfn_params[:lif_neuron_params], kfn)
|
||||
"""
|
||||
# function add_neuron!(neuron_Dict::Dict, kfn::knowledgeFn)
|
||||
# id = length(kfn.neurons_array) + 1
|
||||
# neuron = init_neuron(id, neuron_Dict, kfn.kfn_params,
|
||||
# total_neurons = (length(kfn.neurons_array) + 1))
|
||||
# push!(kfn.neurons_array, neuron)
|
||||
|
||||
# # Randomly select an output neuron to add a new neuron to
|
||||
# add_n_output_n!(Random.rand(kfn.output_neurons_array), id)
|
||||
# end
|
||||
|
||||
""" Add a new neuron to output neuron's subscription_list
|
||||
"""
|
||||
function add_n_output_n!(o_n::linear_neuron, id::Int64)
|
||||
push!(o_n.subscription_list, id)
|
||||
push!(o_n.epsilon_j, 0.0)
|
||||
push!(o_n.w_out, Random.randn(1)[1])
|
||||
push!(o_n.w_out_change, 0.0)
|
||||
end
|
||||
|
||||
calculate_α(neuron::lif_neuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_α(neuron::alif_neuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_ρ(neuron::alif_neuron) = exp(-neuron.delta / neuron.tau_a)
|
||||
calculate_k(neuron::linear_neuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user