778 lines
33 KiB
Julia
778 lines
33 KiB
Julia
module types
|
||
|
||
export
|
||
# struct
|
||
IronpenStruct, model, knowledgeFn, lifNeuron, alifNeuron, linearNeuron,
|
||
kfn_1, inputNeuron, computeNeuron, neuron, outputNeuron, passthroughNeuron,
|
||
|
||
# function
|
||
instantiate_custom_types, init_neuron, populate_neuron,
|
||
add_neuron!
|
||
|
||
using Random, Flux, LinearAlgebra
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
abstract type Ironpen end
|
||
abstract type knowledgeFn <: Ironpen end
|
||
abstract type neuron <: Ironpen end
|
||
abstract type inputNeuron <: neuron end
|
||
abstract type outputNeuron <: neuron end
|
||
abstract type computeNeuron <: neuron end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" Model struct
|
||
"""
|
||
Base.@kwdef mutable struct model <: Ironpen
|
||
knowledgeFn::Union{Dict,Nothing} = nothing
|
||
modelParams::Union{Dict,Nothing} = nothing
|
||
error::Float64 = 0.0
|
||
outputError::Array{Float64} = Float64[]
|
||
|
||
""" "inference" = no learning params will be collected.
|
||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||
reset epsilon_j.
|
||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||
learningStage::String = "inference"
|
||
timeStep::Number = 0.0
|
||
end
|
||
""" Model outer constructor
|
||
|
||
# Example
|
||
I_kfnparams = Dict(
|
||
:type => "lifNeuron",
|
||
:v_t1 => 0.0, # neuron membrane potential at time = t+1
|
||
:v_th => 2.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||
:z_t => false, # neuron firing status at time = t
|
||
:z_t1 => false, # neuron firing status at time = t+1
|
||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||
:phi => 0.0, # psuedo derivative
|
||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||
:delta => 1.0,
|
||
:tau_m => 20.0, # membrane time constant in millisecond. The value is from the paper
|
||
:eta => 0.01, # learning rate
|
||
|
||
I_kfn = Ironpen_ai_gpu.knowledgeFn(I_kfnparams, lif_neuron_params, alif_neuron_params,
|
||
linear_neuron_params)
|
||
|
||
modelParams_1 = Dict(:knowledgeFn => Dict(:I => I_kfn,
|
||
:run => run_kfn),
|
||
:learningStage => "doing_inference",)
|
||
|
||
model_1 = Ironpen_ai_gpu.model(modelParams_1)
|
||
"""
|
||
function model(params::Dict)
|
||
m = model()
|
||
m.modelParams = params
|
||
|
||
fields = fieldnames(typeof(m))
|
||
for i in fields
|
||
if i in keys(params)
|
||
m.:($i) = params[i] # assign params to n struct fields
|
||
end
|
||
end
|
||
|
||
return m
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" knowledgeFn struct
|
||
"""
|
||
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
|
||
knowledgeFnName::String = "not defined"
|
||
kfnParams::Union{Dict,Nothing} = nothing # store params of knowledgeFn itself for later use
|
||
timeStep::Number = 0.0
|
||
|
||
# Bn contain error coefficient for both neurons and output neurons in one place
|
||
Bn::Vector{Float64} = Float64[] # error projection coefficient from kfn output's error to each neurons's error
|
||
neuronsArray::Array{neuron} = neuron[] # put neurons here
|
||
|
||
""" put output neuron here. I seperate output neuron because
|
||
1. its calculation is difference than other neuron types
|
||
2. other neuron type will not induced to connnect to output neuron
|
||
3. output neuron does not induced to connect to its own type """
|
||
outputNeuronsArray::Array{outputNeuron} = outputNeuron[]
|
||
|
||
""" "inference" = no learning params will be collected.
|
||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||
reset epsilon_j.
|
||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||
learningStage::String = "inference"
|
||
|
||
error::Float64 = 0.0
|
||
|
||
firedNeurons::Array{Int64} = Int64[] # store unique id of firing neurons to be used when random neuron connection
|
||
firedNeurons_t0::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t0
|
||
firedNeurons_t1::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t1
|
||
|
||
avgNeuronsFiringRate::Union{Float64,Nothing} = 0.0 # for displaying average firing rate over all neurons
|
||
avgNeurons_v_t1::Union{Float64,Nothing} = 0.0 # for displaying average v_t1 over all neurons
|
||
nExcitatory::Array{Int64} =Int64[] # list of excitatory neuron id
|
||
nInhabitory::Array{Int64} = Int64[] # list of inhabitory neuron id
|
||
nExInType::Array{Int64} = Int64[] # list all neuron EX or IN
|
||
excitatoryPercent::Int64 = 60 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" Knowledge function outer constructor >>> auto generate <<<
|
||
|
||
# Example
|
||
|
||
lif_neuron_params = Dict(
|
||
:type => "lifNeuron",
|
||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||
:z_t => false, # neuron firing status at time = t
|
||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||
:delta => 1.0,
|
||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||
)
|
||
|
||
alif_neuron_params = Dict(
|
||
:type => "alifNeuron",
|
||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||
:z_t => false, # neuron firing status at time = t
|
||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||
:delta => 1.0,
|
||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||
|
||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||
# perform a task i.e. equals to episode length
|
||
:tau_a => 10.0,
|
||
:beta => 0.15, # constant.
|
||
:a => 0.0,
|
||
)
|
||
|
||
linear_neuron_params = Dict(
|
||
:type => "linearNeuron",
|
||
:k => 0.9, # output leakink coefficient
|
||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||
:out => 0.0, # neuron's output value store here
|
||
)
|
||
|
||
I_kfnparams = Dict(
|
||
:knowledgeFnName => "I",
|
||
:lif_neuron_number => 200,
|
||
:alif_neuron_number => 100, # from Allen Institute, ALIF is 40% of LIF
|
||
:linear_neuron_number => 5, # output neuron, this is also the output length
|
||
:Bn => "random", # error projection coefficient from kfn output's error to each neurons's error
|
||
:learning_rate => 0.01,
|
||
:neuron_connection_pattern => "100%", # number of each neuron subscribe to other neuron in knowledgeFn.neuronsArray
|
||
:output_neuron_connection_pattern => "100%", # "60%" of kfn.neuronsArray or number
|
||
:maximum_input_data_length => 5, # in case of GloVe word encoding, it is 300
|
||
:neuron_w_in_generation_pattern => "random", # number or "random"
|
||
:neuron_w_rec_generation_pattern => "random",
|
||
:neuron_v_t_default => 0.5,
|
||
:neuron_voltage_drop_percentage => "100%",
|
||
:neuronFiringRateTarget => 50.0,
|
||
:neuron_learning_rate => 0.01,
|
||
:neuron_c_reg => 0.0001,
|
||
:neuron_c_reg_v => 0.0001,
|
||
:neuron_optimiser => "ADAM",
|
||
:meta_params => Dict(:is_first_cycle => true,
|
||
:launch_time => 0.0,))
|
||
|
||
kfn1 = knowledgeFn(kfnParams, lif_neuron_params, alif_neuron_params, linear_neuron_params)
|
||
"""
|
||
function kfn_1(kfnParams::Dict)
|
||
|
||
kfn = kfn_1()
|
||
kfn.kfnParams = kfnParams
|
||
kfn.knowledgeFnName = kfn.kfnParams[:knowledgeFnName]
|
||
|
||
if kfn.kfnParams[:computeNeuronNumber] < kfn.kfnParams[:totalInputPort]
|
||
throw(error("number of compute neuron must be greater than input neuron"))
|
||
end
|
||
|
||
# # Bn
|
||
# if kfn.kfnParams[:Bn] == "random"
|
||
# kfn.Bn = [Random.rand(0:0.001:1) for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||
# else # in case I want to specify manually
|
||
# kfn.Bn = [kfn.kfnParams[:Bn] for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||
# end
|
||
|
||
# assign neurons ID by their position in kfn.neurons array because I think it is
|
||
# straight forward way
|
||
|
||
# add input port, it must be added before any other neuron types
|
||
for (k, v) in kfn.kfnParams[:inputPort]
|
||
current_type = kfn.kfnParams[:inputPort][k]
|
||
for i = 1:current_type[:numbers]
|
||
n_id = length(kfn.neuronsArray) + 1
|
||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||
push!(kfn.neuronsArray, neuron)
|
||
end
|
||
end
|
||
|
||
# add compute neurons
|
||
for (k, v) in kfn.kfnParams[:computeNeuron]
|
||
current_type = kfn.kfnParams[:computeNeuron][k]
|
||
for i = 1:current_type[:numbers]
|
||
n_id = length(kfn.neuronsArray) + 1
|
||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||
push!(kfn.neuronsArray, neuron)
|
||
end
|
||
end
|
||
|
||
for i = 1:kfn.kfnParams[:outputPort][:numbers]
|
||
neuron = init_neuron(i, kfn.kfnParams[:outputPort][:params],
|
||
kfn.kfnParams)
|
||
push!(kfn.outputNeuronsArray, neuron)
|
||
end
|
||
|
||
for n in kfn.neuronsArray
|
||
if typeof(n) <: computeNeuron
|
||
n.firingRateTarget = kfn.kfnParams[:neuronFiringRateTarget]
|
||
end
|
||
end
|
||
|
||
# excitatory neuron to inhabitory neuron = 60:40 % of computeNeuron
|
||
ex_number = Int(floor((kfn.excitatoryPercent/100.0) * kfn.kfnParams[:computeNeuronNumber]))
|
||
ex_n = [1 for i in 1:ex_number]
|
||
in_number = kfn.kfnParams[:computeNeuronNumber] - ex_number
|
||
in_n = [-1 for i in 1:in_number]
|
||
ex_in = shuffle!([ex_n; in_n])
|
||
|
||
# input neurons are always excitatory, compute_neurons are random between excitatory
|
||
# and inhabitory
|
||
for n in kfn.neuronsArray
|
||
try n.ExInType = pop!(ex_in) catch end
|
||
end
|
||
|
||
# add ExInType into each computeNeuron subExInType
|
||
for n in kfn.neuronsArray
|
||
try # input neuron doest have n.subscriptionList
|
||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||
n.wRec[i] *= n_ExInType
|
||
# add id exin type to kfn
|
||
if n_ExInType < 0
|
||
push!(kfn.nInhabitory, sub_id)
|
||
else
|
||
push!(kfn.nExcitatory, sub_id)
|
||
end
|
||
end
|
||
catch
|
||
end
|
||
end
|
||
|
||
# add ExInType into each output neuron subExInType
|
||
for n in kfn.outputNeuronsArray
|
||
try # input neuron doest have n.subscriptionList
|
||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||
n.wRec[i] *= n_ExInType
|
||
end
|
||
catch
|
||
end
|
||
end
|
||
|
||
for n in kfn.neuronsArray
|
||
push!(kfn.nExInType, n.ExInType)
|
||
end
|
||
|
||
return kfn
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" passthroughNeuron struct
|
||
"""
|
||
Base.@kwdef mutable struct passthroughNeuron <: inputNeuron
|
||
id::Int64 = 0 # ID of this neuron which is it position in knowledgeFn array
|
||
type::String = "passthroughNeuron"
|
||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||
z_t::Bool = false
|
||
z_t1::Bool = false
|
||
timeStep::Int64 = 0 # current time
|
||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory. input neuron is always excitatory
|
||
end
|
||
|
||
function passthroughNeuron(params::Dict)
|
||
n = passthroughNeuron()
|
||
field_names = fieldnames(typeof(n))
|
||
for i in field_names
|
||
if i in keys(params)
|
||
if i == :optimiser
|
||
opt_type = string(split(params[i], ".")[end])
|
||
n.:($i) = load_optimiser(opt_type)
|
||
else
|
||
n.:($i) = params[i] # assign params to n struct fields
|
||
end
|
||
end
|
||
end
|
||
return n
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" lifNeuron struct
|
||
"""
|
||
Base.@kwdef mutable struct lifNeuron <: computeNeuron
|
||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||
type::String = "lifNeuron"
|
||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory
|
||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||
timeStep::Int64 = 0 # current time
|
||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||
synapticStrength::Array{Float64} = Float64[]
|
||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||
|
||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||
refractoryCounter::Int64 = 0
|
||
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
|
||
eta::Float64 = 0.01 # η, learning rate
|
||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||
error::Float64 = 0.0 # local neuron error
|
||
# optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||
|
||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||
|
||
""" "inference" = no learning params will be collected.
|
||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||
reset epsilon_j.
|
||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||
learningStage::String = "inference"
|
||
end
|
||
|
||
""" lif neuron outer constructor
|
||
|
||
# Example
|
||
|
||
lif_neuron_params = Dict(
|
||
:type => "lifNeuron",
|
||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||
:z_t => false, # neuron firing status at time = t
|
||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||
:delta => 1.0,
|
||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||
)
|
||
|
||
neuron1 = lifNeuron(lif_neuron_params)
|
||
"""
|
||
function lifNeuron(params::Dict)
|
||
n = lifNeuron()
|
||
field_names = fieldnames(typeof(n))
|
||
for i in field_names
|
||
if i in keys(params)
|
||
if i == :optimiser
|
||
opt_type = string(split(params[i], ".")[end])
|
||
n.:($i) = load_optimiser(opt_type)
|
||
else
|
||
n.:($i) = params[i] # assign params to n struct fields
|
||
end
|
||
end
|
||
end
|
||
return n
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
""" alifNeuron struct
|
||
"""
|
||
Base.@kwdef mutable struct alifNeuron <: computeNeuron
|
||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||
type::String = "alifNeuron"
|
||
ExInType::Int64 = -1 # 1 excitatory, -1 inhabitory
|
||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||
timeStep::Int64 = 0 # current time
|
||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||
synapticStrength::Array{Float64} = Float64[]
|
||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>0), upperlimit=(5=>5))
|
||
|
||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec(v), eligibility vector for neuron i spike
|
||
epsilonRecA::Array{Float64} = Float64[] # ϵ_rec(a)
|
||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||
eRec_v::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from v_t
|
||
eRec_a::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from av_th
|
||
eRec::Array{Float64} = Float64[] # neuron's eligibility trace
|
||
eta::Float64 = 0.01 # eta, learning rate
|
||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||
refractoryDuration::Int64 = 3 # neuron's refractory period in millisecond
|
||
refractoryCounter::Int64 = 0
|
||
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
|
||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||
error::Float64 = 0.0 # local neuron error
|
||
optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||
|
||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||
firingRate::Float64 = 0.0 # running average of firing rate, Hz
|
||
|
||
tau_a::Float64 = 0.0 # τ_a, adaption time constant in millisecond
|
||
beta::Float64 = 0.15 # β, constant, value from paper
|
||
rho::Float64 = 0.0 # ρ, threshold adaptation decay factor
|
||
a::Float64 = 0.0 # threshold adaptation
|
||
av_th::Float64 = 0.0 # adjusted neuron firing threshold
|
||
|
||
""" "inference" = no learning params will be collected.
|
||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||
reset epsilon_j.
|
||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||
learningStage::String = "inference"
|
||
end
|
||
""" alif neuron outer constructor
|
||
|
||
# Example
|
||
|
||
alif_neuron_params = Dict(
|
||
:type => "alifNeuron",
|
||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I
|
||
use auto generate)
|
||
:z_t => false, # neuron firing status at time = t
|
||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||
:delta => 1.0,
|
||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use
|
||
for 1 sequence
|
||
|
||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||
# perform a task i.e. equals to episode length
|
||
:tau_a => 10.0,
|
||
:beta => 0.15, # constant.
|
||
:a => 0.0,
|
||
)
|
||
|
||
neuron1 = alifNeuron(alif_neuron_params)
|
||
"""
|
||
function alifNeuron(params::Dict)
|
||
n = alifNeuron()
|
||
field_names = fieldnames(typeof(n))
|
||
for i in field_names
|
||
if i in keys(params)
|
||
if i == :optimiser
|
||
opt_type = string(split(params[i], ".")[end])
|
||
n.:($i) = load_optimiser(opt_type)
|
||
else
|
||
n.:($i) = params[i] # assign params to n struct fields
|
||
end
|
||
end
|
||
end
|
||
return n
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
""" linearNeuron struct
|
||
"""
|
||
Base.@kwdef mutable struct linearNeuron <: outputNeuron
|
||
id::Float64 = 0.0 # ID of this neuron which is it position in knowledgeFn array
|
||
type::String = "linearNeuron"
|
||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||
timeStep::Int64 = 0 # current time
|
||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||
vError::Float64 = 0.0 # used to compute model error
|
||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||
|
||
# neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of
|
||
# previous timestep)
|
||
z_i_t::Array{Bool} = Bool[]
|
||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||
synapticStrength::Array{Float64} = Float64[]
|
||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||
|
||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||
refractoryCounter::Int64 = 0
|
||
tau_out::Float64 = 0.0 # τ_out, membrane time constant in millisecond
|
||
eta::Float64 = 0.01 # η, learning rate
|
||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||
|
||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||
end
|
||
|
||
""" linear neuron outer constructor
|
||
|
||
# Example
|
||
|
||
linear_neuron_params = Dict(
|
||
:type => "linearNeuron",
|
||
:k => 0.9, # output leakink coefficient
|
||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||
:out => 0.0, # neuron's output value store here
|
||
)
|
||
|
||
neuron1 = linearNeuron(linear_neuron_params)
|
||
"""
|
||
function linearNeuron(params::Dict)
|
||
n = linearNeuron()
|
||
field_names = fieldnames(typeof(n))
|
||
for i in field_names
|
||
if i in keys(params)
|
||
if i == :optimiser
|
||
opt_type = string(split(params[i], ".")[end])
|
||
n.:($i) = load_optimiser(opt_type)
|
||
else
|
||
n.:($i) = params[i] # assign params to n struct fields
|
||
end
|
||
end
|
||
end
|
||
|
||
return n
|
||
end
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
function load_optimiser(optimiser_name::String; params::Union{Dict,Nothing} = nothing)
|
||
if optimiser_name == "AdaBelief"
|
||
params = (0.01, (0.9, 0.8))
|
||
return Flux.Optimise.AdaBelief(params...)
|
||
elseif optimiser_name == "AdaBelief2"
|
||
# output neuron requires slower change pace so η is lower than compute neuron at 0.007
|
||
# because if w_out change too fast, compute neuron will not able to
|
||
# grapse output neuron moving direction i.e. both compute neuron's direction and
|
||
# output neuron direction are out of sync.
|
||
params = (0.007, (0.9, 0.8))
|
||
return Flux.Optimise.AdaBelief(params...)
|
||
else
|
||
error("optimiser is not defined yet in load_optimiser()")
|
||
end
|
||
end
|
||
|
||
function init_neuron!(id::Int64, n::passthroughNeuron, n_params::Dict, kfnParams::Dict)
|
||
n.id = id
|
||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||
end
|
||
|
||
# function init_neuron!(id::Int64, n::lifNeuron, kfnParams::Dict)
|
||
# n.id = id
|
||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||
# subscription_options = shuffle!([1:(kfnParams[:input_neuron_number]+kfnParams[:computeNeuronNumber])...])
|
||
# if typeof(kfnParams[:synapticConnectionPercent]) == String
|
||
# percent = parse(Int, kfnParams[:synapticConnectionPercent][1:end-1]) / 100
|
||
# synapticConnectionPercent = floor(length(subscription_options) * percent)
|
||
# n.subscriptionList = [pop!(subscription_options) for i = 1:synapticConnectionPercent]
|
||
# end
|
||
# filter!(x -> x != n.id, n.subscriptionList)
|
||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||
# n.wRec = Random.rand(length(n.subscriptionList))
|
||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||
# n.reg_voltage_b = zeros(length(n.subscriptionList))
|
||
# n.alpha = calculate_α(n)
|
||
# end
|
||
|
||
function init_neuron!(id::Int64, n::lifNeuron, n_params::Dict, kfnParams::Dict)
|
||
n.id = id
|
||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||
kfnParams[:totalNeurons]))
|
||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||
|
||
# prevent subscription to itself by removing this neuron id
|
||
filter!(x -> x != n.id, n.subscriptionList)
|
||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||
|
||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||
n.wRecChange = zeros(length(n.subscriptionList))
|
||
n.alpha = calculate_α(n)
|
||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||
end
|
||
|
||
function init_neuron!(id::Int64, n::alifNeuron, n_params::Dict,
|
||
kfnParams::Dict)
|
||
n.id = id
|
||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||
kfnParams[:totalNeurons]))
|
||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||
|
||
# prevent subscription to itself by removing this neuron id
|
||
filter!(x -> x != n.id, n.subscriptionList)
|
||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||
|
||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||
n.wRecChange = zeros(length(n.subscriptionList))
|
||
|
||
# the more time has passed from the last time neuron was activated, the more
|
||
# neuron membrane potential is reduced
|
||
n.alpha = calculate_α(n)
|
||
n.rho = calculate_ρ(n)
|
||
n.epsilonRecA = zeros(length(n.subscriptionList))
|
||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||
end
|
||
|
||
|
||
function init_neuron!(id::Int64, n::linearNeuron, n_params::Dict, kfnParams::Dict)
|
||
n.id = id
|
||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||
|
||
subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||
kfnParams[:totalNeurons]))
|
||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||
|
||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||
n.wRecChange = zeros(length(n.subscriptionList))
|
||
n.alpha = calculate_k(n)
|
||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||
end
|
||
|
||
""" Make a neuron intended for use with knowledgeFn
|
||
"""
|
||
function init_neuron(id::Int64, n_params::Dict, kfnParams::Dict)
|
||
n = instantiate_custom_types(n_params)
|
||
init_neuron!(id, n, n_params, kfnParams)
|
||
|
||
return n
|
||
end
|
||
|
||
""" This function instantiate Ironpen type.
|
||
|
||
# Example
|
||
|
||
new_model = instantiate_custom_types("model")
|
||
"""
|
||
function instantiate_custom_types(params::Union{Dict,Nothing} = nothing)
|
||
type = string(split(params[:type], ".")[end])
|
||
|
||
if type == "model"
|
||
return model()
|
||
elseif type == "knowledgeFn"
|
||
return knowledgeFn()
|
||
elseif type == "passthroughNeuron"
|
||
return passthroughNeuron(params)
|
||
elseif type == "lifNeuron"
|
||
return lifNeuron(params)
|
||
elseif type == "alifNeuron"
|
||
return alifNeuron(params)
|
||
elseif type == "linearNeuron"
|
||
return linearNeuron(params)
|
||
else
|
||
return nothing
|
||
end
|
||
end
|
||
|
||
""" Add a new neuron into a knowledgeFn
|
||
|
||
# Example
|
||
add_neuron!(kfn.kfnParams[:lif_neuron_params], kfn)
|
||
"""
|
||
# function add_neuron!(neuron_Dict::Dict, kfn::knowledgeFn)
|
||
# id = length(kfn.neuronsArray) + 1
|
||
# neuron = init_neuron(id, neuron_Dict, kfn.kfnParams,
|
||
# totalNeurons = (length(kfn.neuronsArray) + 1))
|
||
# push!(kfn.neuronsArray, neuron)
|
||
|
||
# # Randomly select an output neuron to add a new neuron to
|
||
# add_n_output_n!(Random.rand(kfn.outputNeuronsArray), id)
|
||
# end
|
||
|
||
calculate_α(neuron::lifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||
calculate_α(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||
calculate_ρ(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_a)
|
||
calculate_k(neuron::linearNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||
|
||
#------------------------------------------------------------------------------------------------100
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
end # module end
|
||
|
||
|
||
|
||
|
||
|