update version
This commit is contained in:
1
oldVersion/0.0.3/src/.vscode/settings.json
vendored
Normal file
1
oldVersion/0.0.3/src/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
153
oldVersion/0.0.3/src/DB_services.jl
Normal file
153
oldVersion/0.0.3/src/DB_services.jl
Normal file
@@ -0,0 +1,153 @@
|
||||
module DB_services
|
||||
|
||||
""" version 0.2
|
||||
"""
|
||||
|
||||
using DataStructures: count
|
||||
export send_to_DB, data_prep_for_DB
|
||||
|
||||
using DataStructures
|
||||
using JSON3
|
||||
using Redis
|
||||
using Random
|
||||
using UUIDs
|
||||
|
||||
include("Utils.jl")
|
||||
using .Utils
|
||||
|
||||
"""
|
||||
Dummy iron_pen_ai for raw_data_db_service testing
|
||||
"""
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
""" Prepare model data for sending to raw_data_db_service by flattening all hierarchy
|
||||
data structure inside model_data into 1-dept JSON3.
|
||||
This function output is flattened JSON3 data
|
||||
*** all parameter name that is going to Cassandra must not contain a capital letter ***
|
||||
"""
|
||||
function data_prep_for_DB(model_name::String, experiment_number::Int, episode_number::Int,
|
||||
time_stamp::Int, model_data::OrderedDict)::Array{OrderedDict, 1}
|
||||
|
||||
payload_template = OrderedDict{Any, Any}(
|
||||
:model_name => model_name,
|
||||
:knowledgeFn_name => "none",
|
||||
:experiment_number => experiment_number,
|
||||
:episode_number => episode_number,
|
||||
)
|
||||
payloads = []
|
||||
for (k, v) in model_data[:m][:knowledgeFn] # loop over each knowledgeFn
|
||||
payload = deepcopy(payload_template)
|
||||
payload[:knowledgeFn_name] = v[:knowledgefn_name]
|
||||
payload[:neurons_list] = []
|
||||
for (k1, v1) in v
|
||||
if k1 == :neurons_array || k1 == :output_neurons_array
|
||||
for (k2, v2) in v1 # loop over each neuron
|
||||
if k2 != :type # add the following additonal data into neuron's ODict data (already have its parameters in there)
|
||||
neuron = OrderedDict(v2) # v2 is still in JSON3 format but
|
||||
# to be able to add new value to
|
||||
# it, it needs to be in
|
||||
# OrderedDict format
|
||||
|
||||
# # add corresponding knowledgeFn to neuron OrderedDict
|
||||
# neuron[:knowledgefn_name] = v[:knowledgefn_name]
|
||||
|
||||
# add corresponding experiment_number to neuron OrderedDict
|
||||
neuron[:experiment_number] = experiment_number
|
||||
|
||||
# add corresponding episode_number to neuron OrderedDict
|
||||
neuron[:episode_number] = episode_number
|
||||
|
||||
# # add corresponding tick_number to neuron OrderedDict
|
||||
# neuron[:tick_number] = tick_number
|
||||
|
||||
""" add neuron name of itself to neuron OrderedDict
|
||||
since neurons in neurons_array and output_neurons_array has the
|
||||
same name (because its name derived from its position in the
|
||||
array it lives in). In order to store them in the same
|
||||
OrderedDict, I need to change their name so I prefix their name
|
||||
with their array name
|
||||
"""
|
||||
neuron[:neuron_name] = Symbol(string(k1) * "_" * string(k2))
|
||||
|
||||
neuron[:model_error] = model_data[:m][:model_error]
|
||||
|
||||
neuron[:knowledgefn_error] = model_data[:m][:knowledgeFn][k][:knowledgeFn_error]
|
||||
|
||||
neuron[:model_name] = model_name
|
||||
|
||||
# use as identifier durin debug
|
||||
# neuron[:random] = Random.rand(1:100)
|
||||
|
||||
push!(payload[:neurons_list], neuron)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
push!(payloads, payload)
|
||||
end
|
||||
return payloads
|
||||
end
|
||||
|
||||
function send_to_DB(model_name::String, experiment_number::Int, episode_number::Int,
|
||||
tick_number::Int, model_json_string::String, redis_server_ip::String,
|
||||
pub_channel::String, sub_channel::String)
|
||||
model_ordereddict = OrderedDict(JSON3.read(model_json_string))
|
||||
payloads = data_prep_for_DB(model_name, experiment_number, episode_number, tick_number,
|
||||
model_ordereddict)
|
||||
|
||||
for payload in payloads
|
||||
# ask raw data service whether it is ready
|
||||
# println("checking raw_data_db_service")
|
||||
ask = Dict(:sender => "ironpen_ai",
|
||||
:topic => "whois", # [uuid1(), "whois"] to get name of the receiver
|
||||
:topic_id => uuid1(),
|
||||
:responding_to => nothing, # receiver fills in the message uuid it is responding to
|
||||
:communication_channel => sub_channel, # a channel that sender wants receiver to send message to or "none" to get message at receiver's default respond channel
|
||||
:instruction => nothing,
|
||||
:payload => nothing,
|
||||
:isreturn => true)
|
||||
incoming_message = Utils.service_query(redis_server_ip, pub_channel, sub_channel, ask)
|
||||
# println("raw_data_db_service ok")
|
||||
if UUID(incoming_message[:responding_to]) == ask[:topic_id]
|
||||
message = Dict(:sender => "ironpen_ai",
|
||||
:topic => "process", # [uuid1(), "whois"] to get name of the receiver
|
||||
:topic_id => uuid1(),
|
||||
:responding_to => nothing, # receiver fills in the message uuid it is responding to
|
||||
:communication_channel => sub_channel, # a channel that sender wants receiver to send message to or "none" to get message at receiver's default respond channel
|
||||
:instruction => "insert",
|
||||
:payload => payload,
|
||||
:isreturn => false)
|
||||
|
||||
result = Utils.service_query(redis_server_ip, pub_channel, sub_channel, message)
|
||||
# println("published")
|
||||
else
|
||||
error("raw_data_db_service not respond")
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
122
oldVersion/0.0.3/src/Ironpen.jl
Normal file
122
oldVersion/0.0.3/src/Ironpen.jl
Normal file
@@ -0,0 +1,122 @@
|
||||
module Ironpen
|
||||
|
||||
export kfn_1, synapticConnStrength!
|
||||
|
||||
|
||||
""" Order by dependencies of each file. The 1st included file must not depend on any other
|
||||
files and each file can only depend on the file included before it.
|
||||
"""
|
||||
|
||||
include("types.jl")
|
||||
using .types # bring model into this module namespace (this module is a parent module)
|
||||
|
||||
include("snn_utils.jl")
|
||||
using .snn_utils
|
||||
|
||||
# include("Save_and_load.jl")
|
||||
# using .Save_and_load
|
||||
|
||||
# include("DB_services.jl")
|
||||
# using .DB_services
|
||||
|
||||
include("forward.jl")
|
||||
using .forward
|
||||
|
||||
include("learn.jl")
|
||||
using .learn
|
||||
|
||||
# include("readout.jl")
|
||||
# using .readout
|
||||
|
||||
# include("interface.jl")
|
||||
# using .interface
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" version 0.0.3
|
||||
Todo:
|
||||
[4] implement dormant connection
|
||||
[] using RL to control learning signal
|
||||
[] consider using Dates.now() instead of timestamp because time_stamp may overflow
|
||||
[5] training should include adjusting α, neuron membrane potential decay factor
|
||||
which defined by neuron.tau_m formula in type.jl
|
||||
|
||||
Change from version: 0.0.2
|
||||
- new learning method
|
||||
- use Flux.logitcrossentropy for overall error
|
||||
- remove ΔwRecChange that apply immediately during online learning
|
||||
- collect ΔwRecChange during online learning (0-784th) and merge with wRec at
|
||||
the end learning (1000th).
|
||||
- compute model error at the end learning. Model error times with 5 constant for
|
||||
higher learning impact than the error during online
|
||||
|
||||
All features
|
||||
- multidispatch + for loop as main compute method
|
||||
- hard connection constrain yes
|
||||
- normalize output yes
|
||||
- allow -w_rec yes
|
||||
- voltage drop when neuron fires voltage drop equals to vth
|
||||
- v_t decay during refractory
|
||||
duration exponantial decay
|
||||
- input data population encoding, each pixel data =>
|
||||
population encoding, ralative between pixel data
|
||||
- compute neuron weight init rand()
|
||||
- output neuron weight init randn()
|
||||
- each knowledgeFn should have its own noise generater
|
||||
- where to put pseudo derivative (n.phi)
|
||||
- add excitatory, inhabitory to neuron
|
||||
- implement "start learning", reset learning and "learning", "end_learning and
|
||||
"inference"
|
||||
- output neuron connect to random multiple compute neurons and overall have
|
||||
the same structure as lif
|
||||
- time-based learning method based on new error formula
|
||||
(use output vt compared to vth instead of late time)
|
||||
if output neuron not activate when it should, use output neuron's
|
||||
(vth - vt)*100/vth as error
|
||||
if output neuron activates when it should NOT, use output neuron's
|
||||
(vt*100)/vth as error
|
||||
- use LinearAlgebra.normalize!(vector, 1) to adjust weight after weight merge
|
||||
- reset_epsilonRec after ΔwRecChange is calculated
|
||||
- synaptic connection strength concept. use sigmoid, turn connection offline
|
||||
- wRec should not normalized whole. it should be local 5 conn normalized.
|
||||
- neuroplasticity() i.e. change connection
|
||||
- add multi threads
|
||||
- add maximum weight cap of each connection
|
||||
|
||||
|
||||
Removed features
|
||||
- Δweight * connection strength
|
||||
- weaker connection should be harder to increase strength. It requires a lot of
|
||||
repeat activation to get it stronger. While strong connction requires a lot of
|
||||
inactivation to get it weaker. The concept is strong connection will lock
|
||||
correct neural pathway through repeated use of the right connection i.e. keep training
|
||||
on the correct answer -> strengthen the right neural pathway (connections) ->
|
||||
this correct neural pathway resist to change.
|
||||
Not used connection should dissapear (forgetting).
|
||||
- during 0 training if 1-9 output neuron fires, adjust weight only those neurons
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
200
oldVersion/0.0.3/src/WPembeddings.jl
Normal file
200
oldVersion/0.0.3/src/WPembeddings.jl
Normal file
@@ -0,0 +1,200 @@
|
||||
"
|
||||
version 0.4
|
||||
Word and Positional embedding module
|
||||
"
|
||||
module WPembeddings
|
||||
|
||||
using Embeddings
|
||||
using JSON3
|
||||
using Redis
|
||||
|
||||
include("Utils.jl")
|
||||
|
||||
export get_word_embedding, get_positional_embedding, wp_embedding
|
||||
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# user setting for word embedding
|
||||
GloVe_embedding_filepath = "C:\\myWork\\my_projects\\AI\\NLP\\my_NLP\\glove.840B.300d.txt"
|
||||
max_GloVe_vocab_size = 0 # size 10000+ or "all"
|
||||
#----------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# load GloVe word embedding. URL of the embedding file: https://nlp.stanford.edu/projects/glove/
|
||||
if max_GloVe_vocab_size == 0
|
||||
# don't load vocab
|
||||
elseif max_GloVe_vocab_size != "all"
|
||||
@time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath,
|
||||
max_vocab_size=max_GloVe_vocab_size) # size 10000 or something
|
||||
const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
else
|
||||
@time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath)
|
||||
const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
end
|
||||
|
||||
|
||||
# if max_GloVe_vocab_size != "all"
|
||||
# @time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath,
|
||||
# max_vocab_size=max_GloVe_vocab_size) # size 10000 or something
|
||||
# const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
# elseif max_GloVe_vocab_size == 0
|
||||
# else
|
||||
# @time const embtable = Embeddings.load_embeddings(GloVe{:en}, GloVe_embedding_filepath)
|
||||
# const get_word_index = Dict(word=>ii for (ii,word) in enumerate(embtable.vocab))
|
||||
# end
|
||||
|
||||
|
||||
"""
|
||||
get_word_embedding(word::String)
|
||||
|
||||
Get embedding vector of a word. Its dimention is depend on GloVe file used
|
||||
|
||||
# Example
|
||||
|
||||
we_matrix = get_word_embedding("blue")
|
||||
"""
|
||||
function get_word_embedding(word::String)
|
||||
index = get_word_index[word]
|
||||
embedding = embtable.embeddings[:,index]
|
||||
return embedding
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
get_positional_embedding(total_word_position::Integer, word_embedding_dimension::Integer=300)
|
||||
|
||||
return positional embedding matrix of size [word_embedding_dimension * total_word_position]
|
||||
|
||||
# Example
|
||||
|
||||
pe_matrix = get_positional_embedding(length(content), 300)
|
||||
"""
|
||||
function get_positional_embedding(total_word_position::Integer, word_embedding_dimension::Integer=300)
|
||||
d = word_embedding_dimension
|
||||
p = total_word_position
|
||||
pe = [x = i%2 == 0 ? cos(j/(10^(2i/d))) : sin(j/(10^(2i/d))) for i = 1:d, j = 1:p]
|
||||
return pe
|
||||
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_embedding(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
Word embedding with positional embedding.
|
||||
tokenized_word = sentense's tokenized word (not sentense in English definition but BERT definition.
|
||||
1-BERT sentense can be 20+ English's sentense)
|
||||
|
||||
# Example
|
||||
|
||||
|
||||
"""
|
||||
function wp_embedding(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
we_matrix = 0
|
||||
for (i, v) in enumerate(tokenized_word)
|
||||
if i == 1
|
||||
we_matrix = get_word_embedding(v)
|
||||
else
|
||||
we_matrix = hcat(we_matrix, get_word_embedding(v))
|
||||
end
|
||||
end
|
||||
|
||||
if positional_embedding
|
||||
pe_matrix = get_positional_embedding(length(tokenized_word), 300) # positional embedding
|
||||
wp_matrix = we_matrix + pe_matrix
|
||||
|
||||
return wp_matrix
|
||||
else
|
||||
return we_matrix
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_query(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
convert tokenized_word into JSON3 String to be sent to GloVe docker server
|
||||
"""
|
||||
function wp_query_send(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
d = Dict("tokenized_word"=> tokenized_word, "positional_embedding"=>positional_embedding)
|
||||
json3_str = JSON3.write(d)
|
||||
return json3_str
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
wp_query(tokenized_word::Array{String}, positional_embedding::Bool=false)
|
||||
|
||||
Using inside word_embedding_server to receive word embedding job
|
||||
convert JSON3 String into tokenized_word and positional_embedding
|
||||
"""
|
||||
function wp_query_receive(json3_str::String)
|
||||
d = JSON3.read(json3_str)
|
||||
tokenized_word = Array(d.tokenized_word)
|
||||
positional_embedding = d.positional_embedding
|
||||
|
||||
return tokenized_word, positional_embedding
|
||||
end
|
||||
|
||||
|
||||
"""
|
||||
Send tokenized_word to word_embedding_server and return word embedding
|
||||
|
||||
# Example
|
||||
|
||||
WPembeddings.query_wp_server(tokenized_word)
|
||||
"""
|
||||
function query_wp_server(query;
|
||||
host="0.0.0.0",
|
||||
port=6379,
|
||||
publish_channel="word_embedding_server/input",
|
||||
positional_encoding=true)
|
||||
|
||||
# channel used to receive JSON String from word_embedding_server
|
||||
wp_channel = Channel(10)
|
||||
function wp_receive(x)
|
||||
array = Utils.JSON3_str_to_Array(x)
|
||||
put!(wp_channel, array)
|
||||
end
|
||||
|
||||
# establish connection to word_embedding_server using default port
|
||||
conn = Redis.RedisConnection(host=host, port=port)
|
||||
sub = Redis.open_subscription(conn)
|
||||
Redis.subscribe(sub, "word_embedding_server/output", wp_receive)
|
||||
# Redis.subscribe(sub, "word_embedding_server/output", WPembeddings.wp_receive)
|
||||
|
||||
# set positional_encoding = true to enable positional encoding
|
||||
query = WPembeddings.wp_query_send(query, positional_encoding)
|
||||
# Ask word_embedding_server for word embedding
|
||||
Redis.publish(conn, publish_channel, query);
|
||||
wait(wp_channel) # wait for word_embedding_server to response
|
||||
embedded_word = take!(wp_channel)
|
||||
|
||||
disconnect(conn)
|
||||
return embedded_word
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end
|
||||
293
oldVersion/0.0.3/src/forward.jl
Normal file
293
oldVersion/0.0.3/src/forward.jl
Normal file
@@ -0,0 +1,293 @@
|
||||
module forward
|
||||
|
||||
using Statistics, Random, LinearAlgebra, JSON3
|
||||
using GeneralUtils
|
||||
using ..types, ..snn_utils
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Model forward()
|
||||
"""
|
||||
function (m::model)(input_data::AbstractVector)
|
||||
m.timeStep += 1
|
||||
|
||||
# process all corresponding KFN
|
||||
# raw_model_respond, outputNeuron_v_t1, firedNeurons_t1 = m.knowledgeFn[:I](m, input_data)
|
||||
|
||||
# the 2nd return (KFN error) should not be used as model error but I use it because there is
|
||||
# only one KFN in a model right now
|
||||
return m.knowledgeFn[:I](m, input_data)
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" knowledgeFn forward()
|
||||
"""
|
||||
|
||||
function (kfn::kfn_1)(m::model, input_data::AbstractVector)
|
||||
kfn.timeStep = m.timeStep
|
||||
|
||||
kfn.learningStage = m.learningStage
|
||||
|
||||
if kfn.learningStage == "start_learning"
|
||||
# reset params here instead of at the end_learning so that neuron's parameter data
|
||||
# don't gets wiped and can be logged for visualization later
|
||||
for n in kfn.neuronsArray
|
||||
# epsilonRec need to be reset because it counting how many each synaptic fires and
|
||||
# use this info to calculate how much synaptic weight should be adjust
|
||||
resetLearningParams!(n)
|
||||
end
|
||||
|
||||
for n in kfn.outputNeuronsArray
|
||||
# epsilonRec need to be reset because it counting how many each synaptic fires and
|
||||
# use this info to calculate how much synaptic weight should be adjust
|
||||
resetLearningParams!(n)
|
||||
end
|
||||
|
||||
# clear variables
|
||||
kfn.firedNeurons = Int64[]
|
||||
kfn.firedNeurons_t0 = Bool[]
|
||||
kfn.firedNeurons_t1 = Bool[]
|
||||
|
||||
kfn.learningStage = "learning"
|
||||
m.learningStage = kfn.learningStage
|
||||
end
|
||||
|
||||
# generate noise
|
||||
noise = [GeneralUtils.randomChoiceWithProb([true, false],[0.5,0.5])
|
||||
for i in 1:length(input_data)]
|
||||
# noise = [rand(rng, Distributions.Binomial(1, 0.5)) for i in 1:10] # another option
|
||||
|
||||
input_data = [noise; input_data] # noise must start from neuron id 1
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
timestep_forward!(n)
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
timestep_forward!(n)
|
||||
end
|
||||
|
||||
# pass input_data into input neuron.
|
||||
# number of data point equals to number of input neuron starting from id 1
|
||||
for (i, data) in enumerate(input_data)
|
||||
kfn.neuronsArray[i].z_t1 = data
|
||||
end
|
||||
|
||||
kfn.firedNeurons_t0 = [n.z_t for n in kfn.neuronsArray] #TODO check if it is used?
|
||||
|
||||
# Threads.@threads for n in kfn.neuronsArray
|
||||
for n in kfn.neuronsArray
|
||||
n(kfn)
|
||||
end
|
||||
|
||||
kfn.firedNeurons_t1 = [n.z_t1 for n in kfn.neuronsArray]
|
||||
append!(kfn.firedNeurons, findall(kfn.firedNeurons_t1)) # store id of neuron that fires
|
||||
kfn.firedNeurons |> unique! # use for random new neuron connection
|
||||
|
||||
# Threads.@threads for n in kfn.outputNeuronsArray
|
||||
for n in kfn.outputNeuronsArray
|
||||
n(kfn)
|
||||
end
|
||||
|
||||
out = [n.z_t1 for n in kfn.outputNeuronsArray]
|
||||
outputNeuron_v_t1 = [n.v_t1 for n in kfn.outputNeuronsArray]
|
||||
|
||||
return out::Array{Bool}, outputNeuron_v_t1::Array{Float64}, sum(kfn.firedNeurons_t1),
|
||||
kfn.ExInSignalSum
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" passthroughNeuron forward()
|
||||
"""
|
||||
function (n::passthroughNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" lifNeuron forward()
|
||||
"""
|
||||
function (n::lifNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike
|
||||
# last only 1 timestep follow by a period of refractory.
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
else
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
n.v_t1 = no_negative!(n.v_t1)
|
||||
|
||||
if n.v_t1 > n.v_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
else
|
||||
n.z_t1 = false
|
||||
end
|
||||
|
||||
# there is a difference from alif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.v_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
end
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" alifNeuron forward()
|
||||
"""
|
||||
function (n::alifNeuron)(kfn::knowledgeFn)
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t0, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike last only 1 timestep follow by a period of refractory.
|
||||
n.a = (n.rho * n.a) + ((1 - n.rho) * n.z_t)
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
n.phi = 0
|
||||
else
|
||||
n.a = (n.rho * n.a) + ((1 - n.rho) * n.z_t)
|
||||
n.av_th = n.v_th + (n.beta * n.a)
|
||||
n.recSignal = sum(n.wRec .* n.z_i_t) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
n.v_t1 = no_negative!(n.v_t1)
|
||||
if n.v_t1 > n.av_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
else
|
||||
n.z_t1 = false
|
||||
end
|
||||
|
||||
# there is a difference from lif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.av_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
n.epsilonRecA = (n.phi * n.epsilonRec) +
|
||||
((n.rho - (n.phi * n.beta)) * n.epsilonRecA)
|
||||
end
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" linearNeuron forward()
|
||||
In this implementation, each output neuron is fully connected to every lif and alif neuron.
|
||||
"""
|
||||
function (n::linearNeuron)(kfn::T) where T<:knowledgeFn
|
||||
n.timeStep = kfn.timeStep
|
||||
|
||||
# pulling other neuron's firing status at time t
|
||||
n.z_i_t = getindex(kfn.firedNeurons_t1, n.subscriptionList)
|
||||
n.z_i_t_commulative += n.z_i_t
|
||||
|
||||
if n.refractoryCounter != 0
|
||||
n.refractoryCounter -= 1
|
||||
|
||||
# neuron is in refractory state, skip all calculation
|
||||
n.z_t1 = false # used by timestep_forward() in kfn. Set to zero because neuron spike
|
||||
# last only 1 timestep follow by a period of refractory.
|
||||
n.recSignal = n.recSignal * 0.0
|
||||
|
||||
# decay of v_t1
|
||||
n.v_t1 = n.alpha * n.v_t
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
else
|
||||
recSignal = n.wRec .* n.z_i_t
|
||||
if n.id == 1 #FIXME debugging output neuron dead
|
||||
for i in recSignal
|
||||
# if i > 0
|
||||
# kfn.exSignalSum += i
|
||||
# elseif i < 0
|
||||
# kfn.inSignalsum += i
|
||||
# else
|
||||
# end
|
||||
kfn.ExInSignalSum += i
|
||||
end
|
||||
end
|
||||
n.recSignal = sum(recSignal) # signal from other neuron that this neuron subscribed
|
||||
n.alpha_v_t = n.alpha * n.v_t
|
||||
n.v_t1 = n.alpha_v_t + n.recSignal
|
||||
n.v_t1 = no_negative!(n.v_t1)
|
||||
n.vError = n.v_t1 # store voltage that will be used to calculate error later
|
||||
if n.v_t1 > n.v_th
|
||||
n.z_t1 = true
|
||||
n.refractoryCounter = n.refractoryDuration
|
||||
n.firingCounter += 1
|
||||
n.v_t1 = n.vRest
|
||||
else
|
||||
n.z_t1 = false
|
||||
end
|
||||
|
||||
# there is a difference from alif formula
|
||||
n.phi = (n.gammaPd / n.v_th) * max(0, 1 - (n.v_t1 - n.v_th) / n.v_th)
|
||||
n.decayedEpsilonRec = n.alpha * n.epsilonRec
|
||||
n.epsilonRec = n.decayedEpsilonRec + n.z_i_t
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # end module
|
||||
79
oldVersion/0.0.3/src/interface.jl
Normal file
79
oldVersion/0.0.3/src/interface.jl
Normal file
@@ -0,0 +1,79 @@
|
||||
module interface
|
||||
|
||||
|
||||
# export
|
||||
|
||||
# using
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end
|
||||
161
oldVersion/0.0.3/src/learn.jl
Normal file
161
oldVersion/0.0.3/src/learn.jl
Normal file
@@ -0,0 +1,161 @@
|
||||
module learn
|
||||
|
||||
using Statistics, Random, LinearAlgebra, JSON3, Flux
|
||||
using GeneralUtils
|
||||
using ..types, ..snn_utils
|
||||
|
||||
export learn!, compute_wRecChange!, computeModelError
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function learn!(m::model)
|
||||
learn!(m.knowledgeFn[:I])
|
||||
end
|
||||
|
||||
""" knowledgeFn learn()
|
||||
"""
|
||||
function learn!(kfn::kfn_1)
|
||||
# compute kfn error for each neuron
|
||||
Threads.@threads for n in kfn.neuronsArray # multithread is not atomic and causing error
|
||||
# for n in kfn.neuronsArray
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType)
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
learn!(n, kfn.firedNeurons, kfn.nExInType, kfn.kfnParams[:totalInputPort])
|
||||
end
|
||||
|
||||
# wrap up learning session
|
||||
if kfn.learningStage == "end_learning"
|
||||
kfn.learningStage = "inference"
|
||||
end
|
||||
end
|
||||
|
||||
function computeModelError(modelRespond, correctAnswer; magnitude::Float64=1.0)
|
||||
if correctAnswer === nothing
|
||||
correctAnswer = BitArray(zeros(length(modelRespond)))
|
||||
else
|
||||
correctAnswer = Bool.(correctAnswer) # correct answer for kfn I
|
||||
end
|
||||
return Flux.logitcrossentropy(modelRespond, correctAnswer) .* magnitude
|
||||
end
|
||||
|
||||
function compute_wRecChange!(m::model, error::Float64)
|
||||
compute_wRecChange!(m.knowledgeFn[:I], error)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(kfn::kfn_1, error::Float64)
|
||||
# compute kfn error for each neuron
|
||||
Threads.@threads for n in kfn.neuronsArray
|
||||
# for n in kfn.neuronsArray
|
||||
compute_wRecChange!(n, error)
|
||||
end
|
||||
for n in kfn.outputNeuronsArray
|
||||
compute_wRecChange!(n, error)
|
||||
end
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::passthroughNeuron, error::Float64)
|
||||
# skip
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::lifNeuron, error::Float64)
|
||||
n.eRec = n.phi * n.epsilonRec
|
||||
ΔwRecChange = -n.eta * error * n.eRec
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
reset_epsilonRec!(n)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::alifNeuron, error::Float64)
|
||||
n.eRec_v = n.phi * n.epsilonRec
|
||||
n.eRec_a = n.phi * n.beta * n.epsilonRecA
|
||||
n.eRec = n.eRec_v + n.eRec_a
|
||||
ΔwRecChange = -n.eta * error * n.eRec
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
reset_epsilonRec!(n)
|
||||
reset_epsilonRecA!(n)
|
||||
end
|
||||
|
||||
function compute_wRecChange!(n::linearNeuron, error::Float64)
|
||||
n.eRec = n.phi * n.epsilonRec
|
||||
ΔwRecChange = -n.eta * error * n.eRec
|
||||
n.wRecChange .+= ΔwRecChange
|
||||
reset_epsilonRec!(n)
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType) where T<:inputNeuron
|
||||
# skip
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType) where T<:computeNeuron
|
||||
wSign_0 = sign.(n.wRec) # original sign
|
||||
n.wRec += n.wRecChange # merge wRecChange into wRec
|
||||
reset_wRecChange!(n)
|
||||
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
# normalize wRec peak to prevent input signal overwhelming neuron
|
||||
normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# set weight that fliped sign to 0 for random new connection
|
||||
# n.wRec .*= nonFlipedSign
|
||||
capMaxWeight!(n.wRec) # cap maximum weight
|
||||
|
||||
synapticConnStrength!(n)
|
||||
neuroplasticity!(n, firedNeurons, nExInType)
|
||||
end
|
||||
|
||||
function learn!(n::T, firedNeurons, nExInType, totalInputPort) where T<:outputNeuron
|
||||
wSign_0 = sign.(n.wRec) # original sign
|
||||
n.wRec += n.wRecChange
|
||||
reset_wRecChange!(n)
|
||||
wSign_1 = sign.(n.wRec) # check for fliped sign, 1 indicates non-fliped sign
|
||||
nonFlipedSign = isequal.(wSign_0, wSign_1) # 1 not fliped, 0 fliped
|
||||
# normalize wRec peak to prevent input signal overwhelming neuron
|
||||
normalizePeak!(n.wRec, n.wRecChange, 2)
|
||||
# set weight that fliped sign to 0 for random new connection
|
||||
# n.wRec .*= nonFlipedSign
|
||||
capMaxWeight!(n.wRec) # cap maximum weight
|
||||
|
||||
# synapticConnStrength!(n) #CHANGE
|
||||
neuroplasticity!(n,firedNeurons, nExInType, totalInputPort)
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
83
oldVersion/0.0.3/src/readout.jl
Normal file
83
oldVersion/0.0.3/src/readout.jl
Normal file
@@ -0,0 +1,83 @@
|
||||
module readout
|
||||
|
||||
using Flux.Optimise: apply!
|
||||
|
||||
using Statistics, Flux, Random, LinearAlgebra
|
||||
using GeneralUtils
|
||||
using ..types, ..readout, ..learn, ..forward
|
||||
|
||||
export readout!
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function readout!(kfn::knowledgeFn; correctAnswer=nothing) # correctAnswer=nothing use for inference
|
||||
# clear output to start reading
|
||||
# kfn.on_out_t0 *= 0.0 #FIXME should I clear it before RSNN readout?
|
||||
respondCount = zeros(length(kfn.on_out_t0))
|
||||
|
||||
# prepare signal used to read RSNN
|
||||
readoutSignal = zeros(length(kfn.passthrough_zt0))
|
||||
readoutSignal[1] = 1
|
||||
readoutSignal[end] = 1
|
||||
|
||||
lastKfnTimeStamp = kfn.timeStamp[1]
|
||||
for t in 1:kfn.on_tauOut[1]
|
||||
# println("t $t")
|
||||
tick = lastKfnTimeStamp + t
|
||||
if t == kfn.on_tauOut[1]
|
||||
println("")
|
||||
end
|
||||
if kfn.learningStage[1] == 0 # RSNN is in inference mode, do not change marker
|
||||
# skip
|
||||
else # RSNN is in learning mode, assign marker for commiting wChange at the end of readout window.
|
||||
marker = t == kfn.on_tauOut[1] ? 4 : kfn.learningStage[1]
|
||||
end
|
||||
|
||||
# RSNN forward ----------
|
||||
singleTimeReadout, on_out_t0, softmaxRespond = kfn(readoutSignal, tick, marker,
|
||||
correctAnswer=correctAnswer)
|
||||
_, _, respondPosition = Utils.findMax(softmaxRespond)
|
||||
respondCount += respondPosition
|
||||
|
||||
if correctAnswer !== nothing
|
||||
kfn.kfnError = [Flux.logitcrossentropy(on_out_t0, correctAnswer)]
|
||||
learn!(kfn)
|
||||
end
|
||||
end
|
||||
|
||||
_, readout, _ = Utils.findMax(respondCount/kfn.on_tauOut[1])
|
||||
|
||||
return readout, kfn.on_out_t0
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
455
oldVersion/0.0.3/src/snn_utils.jl
Normal file
455
oldVersion/0.0.3/src/snn_utils.jl
Normal file
@@ -0,0 +1,455 @@
|
||||
module snn_utils
|
||||
|
||||
export calculate_α, calculate_ρ, calculate_k, timestep_forward!, init_neuron, no_negative!,
|
||||
precision, calculate_w_change!, store_knowledgefn_error!, interneurons_adjustment!,
|
||||
reset_z_t!, resetLearningParams!, reset_learning_history_params!, reset_epsilonRec!,
|
||||
reset_epsilonRecA!, synapticConnStrength!, normalizePeak!, reset_wRecChange!,
|
||||
firing_rate_error!, firing_rate_regulator!, update_Bn!, cal_firing_reg!,
|
||||
neuroplasticity!, shakeup!, reset_learning_no_wchange!, adjust_internal_learning_rate!,
|
||||
gradient_withloss, capMaxWeight!, connStrengthAdjust
|
||||
|
||||
using Statistics, Random, LinearAlgebra, Distributions, Zygote, Flux
|
||||
using GeneralUtils
|
||||
using ..types
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function timestep_forward!(x::passthroughNeuron)
|
||||
x.z_t = x.z_t1
|
||||
end
|
||||
|
||||
function timestep_forward!(x::Union{computeNeuron, outputNeuron})
|
||||
x.z_t = x.z_t1
|
||||
x.v_t = x.v_t1
|
||||
end
|
||||
|
||||
no_negative!(x) = x < 0.0 ? 0.0 : x
|
||||
precision(x::Array{<:Array}) = ( std(mean.(x)) / mean(mean.(x)) ) * 100
|
||||
|
||||
# reset functions for LIF/ALIF neuron
|
||||
reset_last_firing_time!(n::computeNeuron) = n.lastFiringTime = 0.0
|
||||
reset_refractory_state_active!(n::computeNeuron) = n.refractory_state_active = false
|
||||
reset_v_t!(n::neuron) = n.v_t = n.vRest
|
||||
reset_z_t!(n::computeNeuron) = n.z_t = false
|
||||
reset_epsilonRec!(n::computeNeuron) = n.epsilonRec = n.epsilonRec * 0.0
|
||||
reset_epsilonRec!(n::outputNeuron) = n.epsilonRec = n.epsilonRec * 0.0
|
||||
reset_epsilonRecA!(n::alifNeuron) = n.epsilonRecA = n.epsilonRecA * 0.0
|
||||
reset_epsilon_in!(n::computeNeuron) = n.epsilon_in = isnothing(n.epsilon_in) ? nothing : n.epsilon_in * 0.0
|
||||
reset_error!(n::Union{computeNeuron, outputNeuron}) = n.error = nothing
|
||||
reset_w_in_change!(n::computeNeuron) = n.w_in_change = isnothing(n.w_in_change) ? nothing : n.w_in_change * 0.0
|
||||
reset_wRecChange!(n::Union{computeNeuron, outputNeuron}) = n.wRecChange = n.wRecChange * 0.0
|
||||
reset_a!(n::alifNeuron) = n.a = n.a * 0.0
|
||||
reset_reg_voltage_a!(n::computeNeuron) = n.reg_voltage_a = n.reg_voltage_a * 0.0
|
||||
reset_reg_voltage_b!(n::computeNeuron) = n.reg_voltage_b = n.reg_voltage_b * 0.0
|
||||
reset_reg_voltage_error!(n::computeNeuron) = n.reg_voltage_error = n.reg_voltage_error * 0.0
|
||||
reset_firing_counter!(n::Union{computeNeuron, outputNeuron}) = n.firingCounter = n.firingCounter * 0.0
|
||||
reset_firing_diff!(n::Union{computeNeuron, outputNeuron}) = n.firingDiff = n.firingDiff * 0.0
|
||||
reset_refractoryCounter!(n::Union{computeNeuron, outputNeuron}) = n.refractoryCounter = n.refractoryCounter * 0.0
|
||||
reset_z_i_t_commulative!(n::Union{computeNeuron, outputNeuron}) = n.z_i_t_commulative = n.z_i_t_commulative * 0.0
|
||||
|
||||
# reset function for output neuron
|
||||
reset_epsilon_j!(n::linearNeuron) = n.epsilon_j = n.epsilon_j * 0.0
|
||||
reset_out_t!(n::linearNeuron) = n.out_t = n.out_t * 0.0
|
||||
reset_w_out_change!(n::linearNeuron) = n.w_out_change = n.w_out_change * 0.0
|
||||
reset_b_change!(n::linearNeuron) = n.b_change = n.b_change * 0.0
|
||||
|
||||
|
||||
""" Reset a part of learning-related params that used to collect learning history during learning
|
||||
session
|
||||
"""
|
||||
# function reset_learning_no_wchange!(n::lifNeuron)
|
||||
# reset_epsilonRec!(n)
|
||||
# # reset_v_t!(n)
|
||||
# # reset_z_t!(n)
|
||||
# # reset_reg_voltage_a!(n)
|
||||
# # reset_reg_voltage_b!(n)
|
||||
# # reset_reg_voltage_error!(n)
|
||||
# reset_firing_counter!(n)
|
||||
# reset_firing_diff!(n)
|
||||
# reset_previous_error!(n)
|
||||
# reset_error!(n)
|
||||
|
||||
# # # reset refractory state at the end of episode. Otherwise once neuron goes into refractory state,
|
||||
# # # it will stay in refractory state forever
|
||||
# # reset_refractory_state_active!(n)
|
||||
# end
|
||||
# function reset_learning_no_wchange!(n::Union{alifNeuron, elif_neuron})
|
||||
# reset_epsilonRec!(n)
|
||||
# reset_epsilonRecA!(n)
|
||||
# reset_v_t!(n)
|
||||
# reset_z_t!(n)
|
||||
# # reset_a!(n)
|
||||
# reset_reg_voltage_a!(n)
|
||||
# reset_reg_voltage_b!(n)
|
||||
# reset_reg_voltage_error!(n)
|
||||
# reset_firing_counter!(n)
|
||||
# reset_firing_diff!(n)
|
||||
# reset_previous_error!(n)
|
||||
# reset_error!(n)
|
||||
|
||||
# # reset refractory state at the end of episode. Otherwise once neuron goes into refractory state,
|
||||
# # it will stay in refractory state forever
|
||||
# reset_refractory_state_active!(n)
|
||||
# end
|
||||
# function reset_learning_no_wchange!(n::linearNeuron)
|
||||
# reset_epsilon_j!(n)
|
||||
# reset_out_t!(n)
|
||||
# reset_error!(n)
|
||||
# end
|
||||
|
||||
""" Reset all learning-related params at the END of learning session
|
||||
"""
|
||||
function resetLearningParams!(n::lifNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_wRecChange!(n)
|
||||
# reset_v_t!(n)
|
||||
# reset_z_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
# reset_refractoryCounter!(n)
|
||||
reset_z_i_t_commulative!(n)
|
||||
end
|
||||
function resetLearningParams!(n::alifNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_epsilonRecA!(n)
|
||||
reset_wRecChange!(n)
|
||||
# reset_v_t!(n)
|
||||
# reset_z_t!(n)
|
||||
# reset_a!(n)
|
||||
reset_firing_counter!(n)
|
||||
reset_firing_diff!(n)
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
# reset_refractoryCounter!(n)
|
||||
reset_z_i_t_commulative!(n)
|
||||
end
|
||||
|
||||
# function reset_learning_no_wchange!(n::passthroughNeuron)
|
||||
# end
|
||||
|
||||
function resetLearningParams!(n::passthroughNeuron)
|
||||
# skip
|
||||
end
|
||||
|
||||
function resetLearningParams!(n::linearNeuron)
|
||||
reset_epsilonRec!(n)
|
||||
reset_wRecChange!(n)
|
||||
# reset_v_t!(n)
|
||||
reset_firing_counter!(n)
|
||||
|
||||
# reset refractory state at the start/end of episode. Otherwise once neuron goes into
|
||||
# refractory state, it will stay in refractory state forever
|
||||
# reset_refractoryCounter!(n)
|
||||
reset_z_i_t_commulative!(n)
|
||||
end
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
function store_knowledgefn_error!(kfn::knowledgeFn)
|
||||
# condition to adjust nueron in KFN plane in addition to weight adjustment inside each neuron
|
||||
if kfn.learningStage == "start_learning"
|
||||
if kfn.recent_knowledgeFn_error === nothing && kfn.knowledgeFn_error === nothing
|
||||
kfn.recent_knowledgeFn_error = [[]]
|
||||
elseif kfn.recent_knowledgeFn_error === nothing
|
||||
kfn.recent_knowledgeFn_error = [[kfn.knowledgeFn_error]]
|
||||
elseif kfn.recent_knowledgeFn_error !== nothing && kfn.knowledgeFn_error === nothing
|
||||
push!(kfn.recent_knowledgeFn_error, [])
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error, [kfn.knowledgeFn_error])
|
||||
end
|
||||
elseif kfn.learningStage == "during_learning"
|
||||
if kfn.knowledgeFn_error === nothing
|
||||
#skip
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error[end], kfn.knowledgeFn_error)
|
||||
end
|
||||
elseif kfn.learningStage == "end_learning"
|
||||
if kfn.recent_knowledgeFn_error === nothing
|
||||
#skip
|
||||
else
|
||||
push!(kfn.recent_knowledgeFn_error[end], kfn.knowledgeFn_error)
|
||||
end
|
||||
else
|
||||
error("case does not defined yet")
|
||||
end
|
||||
|
||||
if length(kfn.recent_knowledgeFn_error) > 3
|
||||
deleteat!(kfn.recent_knowledgeFn_error, 1)
|
||||
end
|
||||
end
|
||||
|
||||
function update_Bn!(kfn::knowledgeFn)
|
||||
Δw = nothing
|
||||
for n in kfn.outputNeuronsArray
|
||||
Δw = Δw === nothing ? n.w_out_change : Δw + n.w_out_change
|
||||
n.w_out = n.w_out - (n.Bn_wout_decay * n.w_out) # w_out decay
|
||||
end
|
||||
# Δw = Δw / kfn.kfnParams[:linear_neuron_number] # average
|
||||
|
||||
input_neuron_number = kfn.kfnParams[:input_neuron_number] # skip input neuron
|
||||
for i = 1:kfn.kfnParams[:compute_neuron_number]
|
||||
n = kfn.neuronsArray[input_neuron_number+i]
|
||||
n.Bn = n.Bn + Δw[i]
|
||||
n.Bn = n.Bn - (n.Bn_wout_decay * n.Bn) # w_out decay
|
||||
end
|
||||
end
|
||||
|
||||
""" Regulates membrane potential to stay under v_th, output is weight change
|
||||
"""
|
||||
function cal_v_reg!(n::lifNeuron)
|
||||
# retified linear function
|
||||
component_a1 = n.v_t1 - n.v_th < 0 ? 0 : (n.v_t1 - n.v_th)^2
|
||||
component_a2 = -n.v_t1 - n.v_th < 0 ? 0 : (-n.v_t1 - n.v_th)^2
|
||||
n.reg_voltage_a = n.reg_voltage_a + component_a1 + component_a2
|
||||
|
||||
component_b = n.v_t1 - n.v_th < 0 ? 0 : n.v_t1 - n.v_th
|
||||
#FIXME: not sure the following line is correct
|
||||
n.reg_voltage_b = n.reg_voltage_b + (component_b * n.epsilonRec)
|
||||
end
|
||||
|
||||
function cal_v_reg!(n::alifNeuron)
|
||||
# retified linear function
|
||||
component_a1 = n.v_t1 - n.av_th < 0 ? 0 : (n.v_t1 - n.av_th)^2
|
||||
component_a2 = -n.v_t1 - n.av_th < 0 ? 0 : (-n.v_t1 - n.av_th)^2
|
||||
n.reg_voltage_a = n.reg_voltage_a + component_a1 + component_a2
|
||||
|
||||
component_b = n.v_t1 - n.av_th < 0 ? 0 : n.v_t1 - n.av_th
|
||||
#FIXME: not sure the following line is correct
|
||||
n.reg_voltage_b = n.reg_voltage_b + (component_b * (n.epsilonRec - n.epsilonRecA))
|
||||
end
|
||||
|
||||
function voltage_error!(n::computeNeuron)
|
||||
n.reg_voltage_error = 0.5 * n.reg_voltage_a
|
||||
return n.reg_voltage_error
|
||||
end
|
||||
|
||||
function voltage_regulator!(n::computeNeuron) # running average
|
||||
Δw = n.optimiser.eta * n.c_reg_v * n.reg_voltage_b
|
||||
return Δw
|
||||
end
|
||||
|
||||
function firingRateError(kfn::knowledgeFn)
|
||||
start_id = kfn.kfnParams[:input_neuron_number] + 1
|
||||
return 0.5 * sum([(n.firingDiff)^2 for n in kfn.neuronsArray[start_id:end]])
|
||||
end
|
||||
|
||||
function firing_rate_regulator!(n::computeNeuron)
|
||||
# n.firingRate NOT running average (average over learning batch)
|
||||
Δw = n.optimiser.eta * n.c_reg *
|
||||
(n.firingRate - n.firingRateTarget) * n.eRec
|
||||
Δw = n.firingRate > n.firingRateTarget ? Δw : Δw * 0.0
|
||||
return Δw
|
||||
end
|
||||
|
||||
firing_rate!(n::computeNeuron) = n.firingRate = (n.firingCounter / n.timeStep) * 1000
|
||||
firing_diff!(n::computeNeuron) = n.firingDiff = n.firingRate - n.firingRateTarget
|
||||
|
||||
function adjust_internal_learning_rate!(n::computeNeuron)
|
||||
n.internal_learning_rate = n.error_diff[end] < 0.0 ? n.internal_learning_rate * 0.99 :
|
||||
n.internal_learning_rate * 1.005
|
||||
end
|
||||
|
||||
function connStrengthAdjust(currentStrength::Float64)
|
||||
Δstrength = (1.0 - sigmoid(currentStrength))
|
||||
return Δstrength::Float64
|
||||
end
|
||||
|
||||
""" Compute synaptic connection strength. bias will shift currentStrength to fit into
|
||||
sigmoid operating range which centred at 0 and range is -37 to 37.
|
||||
|
||||
# Example
|
||||
synaptic strength range is 0 to 10
|
||||
one may use bias = -5 to transform synaptic strength into range -5 to 5
|
||||
the return value is shifted back to original scale.
|
||||
|
||||
# Concept
|
||||
weaker connection should be harder to increase strength. It requires a lot of
|
||||
repeat activation to get it stronger. While strong connction requires a lot of
|
||||
inactivation to get it weaker. The concept is strong connection will lock
|
||||
correct neural pathway through repeated use of the right connection i.e. keep training
|
||||
on the correct answer -> strengthen the right neural pathway (connections) ->
|
||||
this correct neural pathway resist to change.
|
||||
Not used connection should dissapear (forgetting).
|
||||
"""
|
||||
function synapticConnStrength(currentStrength::Float64, updown::String)
|
||||
Δstrength = connStrengthAdjust(currentStrength)
|
||||
|
||||
if updown == "up"
|
||||
if currentStrength > 4 # strong connection
|
||||
updatedStrength = currentStrength + (Δstrength * 1.0)
|
||||
else
|
||||
updatedStrength = currentStrength + (Δstrength * 1.0)
|
||||
end
|
||||
elseif updown == "down"
|
||||
if currentStrength > 4
|
||||
updatedStrength = currentStrength - (Δstrength * 1.0)
|
||||
else
|
||||
updatedStrength = currentStrength - (Δstrength * 1.0)
|
||||
end
|
||||
else
|
||||
error("undefined condition line $(@__LINE__)")
|
||||
end
|
||||
return updatedStrength::Float64
|
||||
end
|
||||
|
||||
""" Compute all synaptic connection strength of a neuron. Also mark n.wRec to 0 if wRec goes
|
||||
below lowerlimit.
|
||||
"""
|
||||
function synapticConnStrength!(n::Union{computeNeuron, outputNeuron})
|
||||
for (i, connStrength) in enumerate(n.synapticStrength)
|
||||
# check whether connStrength increase or decrease based on usage from n.epsilonRec
|
||||
""" use n.z_i_t_commulative instead of the best choice, epsilonRec, here because ΔwRecChange
|
||||
calculation in learn!() will reset epsilonRec to zeroes vector in case where
|
||||
output neuron fires and trigger learn!() just before this synapticConnStrength
|
||||
calculation.
|
||||
Since n.z_i_t_commulative indicates whether a synaptic connection were used or not, it is
|
||||
ok to use. n.z_i_t_commulative also span across a training sample without resetting.
|
||||
"""
|
||||
updown = n.z_i_t_commulative[i] == 0 ? "down" : "up"
|
||||
updatedConnStrength = synapticConnStrength(connStrength, updown)
|
||||
updatedConnStrength = GeneralUtils.limitvalue(updatedConnStrength,
|
||||
n.synapticStrengthLimit.lowerlimit, n.synapticStrengthLimit.upperlimit)
|
||||
# at lowerlimit, mark wRec at this position to 0. for new random synaptic conn
|
||||
if updatedConnStrength == n.synapticStrengthLimit.lowerlimit[1]
|
||||
n.wRec[i] = 0.0
|
||||
end
|
||||
n.synapticStrength[i] = updatedConnStrength
|
||||
end
|
||||
end
|
||||
|
||||
function synapticConnStrength!(n::inputNeuron) end
|
||||
|
||||
""" normalize a part of a vector centering at a vector's maximum value along with nearby value
|
||||
within its radius. radius must be odd number.
|
||||
v1 will be normalized based on v2's peak
|
||||
"""
|
||||
function normalizePeak!(v1::Vector, v2::Vector, radius::Integer=2)
|
||||
peak = findall(isequal.(abs.(v2), maximum(abs.(v2))))[1]
|
||||
upindex = peak - radius
|
||||
upindex = upindex < 1 ? 1 : upindex
|
||||
downindex = peak + radius
|
||||
downindex = downindex > length(v1) ? length(v1) : downindex
|
||||
subvector = view(v1, upindex:downindex)
|
||||
normalize!(subvector, 1)
|
||||
end
|
||||
|
||||
""" rewire of neuron synaptic connection that has 0 weight. Without connection's excitatory and
|
||||
inhabitory ratio constraint.
|
||||
"""
|
||||
function neuroplasticity!(n::computeNeuron, firedNeurons::Vector,
|
||||
nExInTypeList::Vector)
|
||||
# if there is 0-weight then replace it with new connection
|
||||
zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
|
||||
# new synaptic connection must sample fron neuron that fires
|
||||
nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
|
||||
w = rand(0.01:0.01:0.2, length(zeroWeightConnIndex))
|
||||
synapticStrength = rand(-5:0.01:-4, length(zeroWeightConnIndex))
|
||||
|
||||
shuffle!(nFiredPool)
|
||||
shuffle!(nNonFiredPool)
|
||||
|
||||
# add new synaptic connection to neuron
|
||||
for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
if length(nFiredPool) != 0
|
||||
newConn = popfirst!(nFiredPool)
|
||||
else
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
end
|
||||
|
||||
""" conn that is being replaced has to go into nNonFiredPool so nNonFiredPool isn't empty
|
||||
"""
|
||||
push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
|
||||
function neuroplasticity!(n::outputNeuron, firedNeurons::Vector,
|
||||
nExInTypeList::Vector, totalInputNeuron::Integer)
|
||||
# if there is 0-weight then replace it with new connection
|
||||
zeroWeightConnIndex = findall(iszero.(n.wRec)) # connection that has 0 weight
|
||||
|
||||
# new synaptic connection must sample fron neuron that fires
|
||||
nFiredPool = filter(x -> x ∉ [n.id], firedNeurons) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nFiredPool) # exclude input neuron
|
||||
|
||||
nNonFiredPool = setdiff!([1:length(nExInTypeList)...], nFiredPool)
|
||||
filter!(x -> x ∉ [n.id], nNonFiredPool) # exclude this neuron id from the id list
|
||||
filter!(x -> x ∉ n.subscriptionList, nNonFiredPool) # exclude this neuron's subscriptionList from the list
|
||||
filter!(x -> x ∉ [1:totalInputNeuron...], nNonFiredPool) # exclude input neuron
|
||||
|
||||
w = rand(0.01:0.01:0.2, length(zeroWeightConnIndex))
|
||||
synapticStrength = rand(-5:0.01:-4, length(zeroWeightConnIndex))
|
||||
|
||||
shuffle!(nFiredPool)
|
||||
shuffle!(nNonFiredPool)
|
||||
|
||||
# add new synaptic connection to neuron
|
||||
for (i, connIndex) in enumerate(zeroWeightConnIndex)
|
||||
newConn::Int64 = 0
|
||||
if length(nFiredPool) != 0
|
||||
newConn = popfirst!(nFiredPool)
|
||||
elseif length(nNonFiredPool) != 0
|
||||
newConn = popfirst!(nNonFiredPool)
|
||||
else
|
||||
# skip
|
||||
end
|
||||
|
||||
if newConn != 0
|
||||
""" conn that is being replaced has to go into nNonFiredPool so nNonFiredPool isn't empty
|
||||
"""
|
||||
push!(nNonFiredPool, n.subscriptionList[connIndex])
|
||||
n.subscriptionList[connIndex] = newConn
|
||||
n.wRec[connIndex] = w[i] * nExInTypeList[newConn]
|
||||
n.synapticStrength[connIndex] = synapticStrength[i]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
""" Cap maximum weight of each neuron connection
|
||||
"""
|
||||
function capMaxWeight!(v::Vector{Float64}, max=1.0)
|
||||
originalSign = sign.(v)
|
||||
v = originalSign .* GeneralUtils.replaceMoreThan.(abs.(v), max)
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # end module
|
||||
779
oldVersion/0.0.3/src/types.jl
Normal file
779
oldVersion/0.0.3/src/types.jl
Normal file
@@ -0,0 +1,779 @@
|
||||
module types
|
||||
|
||||
export
|
||||
# struct
|
||||
IronpenStruct, model, knowledgeFn, lifNeuron, alifNeuron, linearNeuron,
|
||||
kfn_1, inputNeuron, computeNeuron, neuron, outputNeuron, passthroughNeuron,
|
||||
|
||||
# function
|
||||
instantiate_custom_types, init_neuron, populate_neuron,
|
||||
add_neuron!
|
||||
|
||||
using Random, LinearAlgebra
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
abstract type Ironpen end
|
||||
abstract type knowledgeFn <: Ironpen end
|
||||
abstract type neuron <: Ironpen end
|
||||
abstract type inputNeuron <: neuron end
|
||||
abstract type outputNeuron <: neuron end
|
||||
abstract type computeNeuron <: neuron end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Model struct
|
||||
"""
|
||||
Base.@kwdef mutable struct model <: Ironpen
|
||||
knowledgeFn::Union{Dict,Nothing} = nothing
|
||||
modelParams::Union{Dict,Nothing} = nothing
|
||||
error::Float64 = 0.0
|
||||
outputError::Array{Float64} = Float64[]
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
timeStep::Number = 0.0
|
||||
end
|
||||
""" Model outer constructor
|
||||
|
||||
# Example
|
||||
I_kfnparams = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_t1 => 0.0, # neuron membrane potential at time = t+1
|
||||
:v_th => 2.0, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:z_t1 => false, # neuron firing status at time = t+1
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:phi => 0.0, # psuedo derivative
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 20.0, # membrane time constant in millisecond. The value is from the paper
|
||||
:eta => 0.01, # learning rate
|
||||
|
||||
I_kfn = Ironpen_ai_gpu.knowledgeFn(I_kfnparams, lif_neuron_params, alif_neuron_params,
|
||||
linear_neuron_params)
|
||||
|
||||
modelParams_1 = Dict(:knowledgeFn => Dict(:I => I_kfn,
|
||||
:run => run_kfn),
|
||||
:learningStage => "doing_inference",)
|
||||
|
||||
model_1 = Ironpen_ai_gpu.model(modelParams_1)
|
||||
"""
|
||||
function model(params::Dict)
|
||||
m = model()
|
||||
m.modelParams = params
|
||||
|
||||
fields = fieldnames(typeof(m))
|
||||
for i in fields
|
||||
if i in keys(params)
|
||||
m.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" knowledgeFn struct
|
||||
"""
|
||||
Base.@kwdef mutable struct kfn_1 <: knowledgeFn
|
||||
knowledgeFnName::String = "not defined"
|
||||
kfnParams::Union{Dict,Nothing} = nothing # store params of knowledgeFn itself for later use
|
||||
timeStep::Number = 0.0
|
||||
|
||||
# Bn contain error coefficient for both neurons and output neurons in one place
|
||||
Bn::Vector{Float64} = Float64[] # error projection coefficient from kfn output's error to each neurons's error
|
||||
neuronsArray::Array{neuron} = neuron[] # put neurons here
|
||||
|
||||
""" put output neuron here. I seperate output neuron because
|
||||
1. its calculation is difference than other neuron types
|
||||
2. other neuron type will not induced to connnect to output neuron
|
||||
3. output neuron does not induced to connect to its own type """
|
||||
outputNeuronsArray::Array{outputNeuron} = outputNeuron[]
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
|
||||
error::Float64 = 0.0
|
||||
|
||||
firedNeurons::Array{Int64} = Int64[] # store unique id of firing neurons to be used when random neuron connection
|
||||
firedNeurons_t0::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t0
|
||||
firedNeurons_t1::Union{Vector{Bool},Nothing} = nothing # store firing state of all neurons at t1
|
||||
|
||||
avgNeuronsFiringRate::Union{Float64,Nothing} = 0.0 # for displaying average firing rate over all neurons
|
||||
avgNeurons_v_t1::Union{Float64,Nothing} = 0.0 # for displaying average v_t1 over all neurons
|
||||
nExcitatory::Array{Int64} =Int64[] # list of excitatory neuron id
|
||||
nInhabitory::Array{Int64} = Int64[] # list of inhabitory neuron id
|
||||
nExInType::Array{Int64} = Int64[] # list all neuron EX or IN
|
||||
excitatoryPercent::Int64 = 60 # percentage of excitatory neuron, inhabitory percent will be 100-ExcitatoryPercent
|
||||
|
||||
ExInSignalSum = 0
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" Knowledge function outer constructor >>> auto generate <<<
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linearNeuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
I_kfnparams = Dict(
|
||||
:knowledgeFnName => "I",
|
||||
:lif_neuron_number => 200,
|
||||
:alif_neuron_number => 100, # from Allen Institute, ALIF is 40% of LIF
|
||||
:linear_neuron_number => 5, # output neuron, this is also the output length
|
||||
:Bn => "random", # error projection coefficient from kfn output's error to each neurons's error
|
||||
:learning_rate => 0.01,
|
||||
:neuron_connection_pattern => "100%", # number of each neuron subscribe to other neuron in knowledgeFn.neuronsArray
|
||||
:output_neuron_connection_pattern => "100%", # "60%" of kfn.neuronsArray or number
|
||||
:maximum_input_data_length => 5, # in case of GloVe word encoding, it is 300
|
||||
:neuron_w_in_generation_pattern => "random", # number or "random"
|
||||
:neuron_w_rec_generation_pattern => "random",
|
||||
:neuron_v_t_default => 0.5,
|
||||
:neuron_voltage_drop_percentage => "100%",
|
||||
:neuronFiringRateTarget => 50.0,
|
||||
:neuron_learning_rate => 0.01,
|
||||
:neuron_c_reg => 0.0001,
|
||||
:neuron_c_reg_v => 0.0001,
|
||||
:neuron_optimiser => "ADAM",
|
||||
:meta_params => Dict(:is_first_cycle => true,
|
||||
:launch_time => 0.0,))
|
||||
|
||||
kfn1 = knowledgeFn(kfnParams, lif_neuron_params, alif_neuron_params, linear_neuron_params)
|
||||
"""
|
||||
function kfn_1(kfnParams::Dict)
|
||||
|
||||
kfn = kfn_1()
|
||||
kfn.kfnParams = kfnParams
|
||||
kfn.knowledgeFnName = kfn.kfnParams[:knowledgeFnName]
|
||||
|
||||
if kfn.kfnParams[:computeNeuronNumber] < kfn.kfnParams[:totalInputPort]
|
||||
throw(error("number of compute neuron must be greater than input neuron"))
|
||||
end
|
||||
|
||||
# # Bn
|
||||
# if kfn.kfnParams[:Bn] == "random"
|
||||
# kfn.Bn = [Random.rand(0:0.001:1) for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# else # in case I want to specify manually
|
||||
# kfn.Bn = [kfn.kfnParams[:Bn] for i in 1:kfn.kfnParams[:computeNeuronNumber]]
|
||||
# end
|
||||
|
||||
# assign neurons ID by their position in kfn.neurons array because I think it is
|
||||
# straight forward way
|
||||
|
||||
# add input port, it must be added before any other neuron types
|
||||
for (k, v) in kfn.kfnParams[:inputPort]
|
||||
current_type = kfn.kfnParams[:inputPort][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neuronsArray) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||||
push!(kfn.neuronsArray, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
# add compute neurons
|
||||
for (k, v) in kfn.kfnParams[:computeNeuron]
|
||||
current_type = kfn.kfnParams[:computeNeuron][k]
|
||||
for i = 1:current_type[:numbers]
|
||||
n_id = length(kfn.neuronsArray) + 1
|
||||
neuron = init_neuron(n_id, current_type[:params], kfn.kfnParams)
|
||||
push!(kfn.neuronsArray, neuron)
|
||||
end
|
||||
end
|
||||
|
||||
for i = 1:kfn.kfnParams[:outputPort][:numbers]
|
||||
neuron = init_neuron(i, kfn.kfnParams[:outputPort][:params],
|
||||
kfn.kfnParams)
|
||||
push!(kfn.outputNeuronsArray, neuron)
|
||||
end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
if typeof(n) <: computeNeuron
|
||||
n.firingRateTarget = kfn.kfnParams[:neuronFiringRateTarget]
|
||||
end
|
||||
end
|
||||
|
||||
# excitatory neuron to inhabitory neuron = 60:40 % of computeNeuron
|
||||
ex_number = Int(floor((kfn.excitatoryPercent/100.0) * kfn.kfnParams[:computeNeuronNumber]))
|
||||
ex_n = [1 for i in 1:ex_number]
|
||||
in_number = kfn.kfnParams[:computeNeuronNumber] - ex_number
|
||||
in_n = [-1 for i in 1:in_number]
|
||||
ex_in = shuffle!([ex_n; in_n])
|
||||
|
||||
# input neurons are always excitatory, compute_neurons are random between excitatory
|
||||
# and inhabitory
|
||||
for n in kfn.neuronsArray
|
||||
try n.ExInType = pop!(ex_in) catch end
|
||||
end
|
||||
|
||||
# add ExInType into each computeNeuron subExInType
|
||||
for n in kfn.neuronsArray
|
||||
try # input neuron doest have n.subscriptionList
|
||||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
n.wRec[i] *= n_ExInType
|
||||
# add id exin type to kfn
|
||||
if n_ExInType < 0
|
||||
push!(kfn.nInhabitory, sub_id)
|
||||
else
|
||||
push!(kfn.nExcitatory, sub_id)
|
||||
end
|
||||
end
|
||||
catch
|
||||
end
|
||||
end
|
||||
|
||||
# add ExInType into each output neuron subExInType
|
||||
for n in kfn.outputNeuronsArray
|
||||
try # input neuron doest have n.subscriptionList
|
||||
for (i, sub_id) in enumerate(n.subscriptionList)
|
||||
n_ExInType = kfn.neuronsArray[sub_id].ExInType
|
||||
n.wRec[i] *= n_ExInType
|
||||
end
|
||||
catch
|
||||
end
|
||||
end
|
||||
|
||||
for n in kfn.neuronsArray
|
||||
push!(kfn.nExInType, n.ExInType)
|
||||
end
|
||||
|
||||
return kfn
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" passthroughNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct passthroughNeuron <: inputNeuron
|
||||
id::Int64 = 0 # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "passthroughNeuron"
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
z_t::Bool = false
|
||||
z_t1::Bool = false
|
||||
timeStep::Int64 = 0 # current time
|
||||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory. input neuron is always excitatory
|
||||
end
|
||||
|
||||
function passthroughNeuron(params::Dict)
|
||||
n = passthroughNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" lifNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct lifNeuron <: computeNeuron
|
||||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "lifNeuron"
|
||||
ExInType::Int64 = 1 # 1 excitatory, -1 inhabitory
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
|
||||
eta::Float64 = 0.0001 # η, learning rate
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
error::Float64 = 0.0 # local neuron error
|
||||
# optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate in Hz
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
end
|
||||
|
||||
""" lif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
lif_neuron_params = Dict(
|
||||
:type => "lifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in tick
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use for 1 sequence
|
||||
)
|
||||
|
||||
neuron1 = lifNeuron(lif_neuron_params)
|
||||
"""
|
||||
function lifNeuron(params::Dict)
|
||||
n = lifNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
""" alifNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct alifNeuron <: computeNeuron
|
||||
id::Int64 = 0 # this neuron ID i.e. position of this neuron in knowledgeFn
|
||||
type::String = "alifNeuron"
|
||||
ExInType::Int64 = -1 # 1 excitatory, -1 inhabitory
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
z_i_t::Array{Bool} = Bool[] # neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of previous timestep)
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>0), upperlimit=(5=>5))
|
||||
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec(v), eligibility vector for neuron i spike
|
||||
epsilonRecA::Array{Float64} = Float64[] # ϵ_rec(a)
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec_v::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from v_t
|
||||
eRec_a::Array{Float64} = Float64[] # a component of neuron's eligibility trace resulted from av_th
|
||||
eRec::Array{Float64} = Float64[] # neuron's eligibility trace
|
||||
eta::Float64 = 0.0001 # eta, learning rate
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
refractoryDuration::Int64 = 3 # neuron's refractory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_m::Float64 = 0.0 # τ_m, membrane time constant in millisecond
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
error::Float64 = 0.0 # local neuron error
|
||||
# optimiser::Union{Any,Nothing} = load_optimiser("AdaBelief") # Flux optimizer
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
firingRateTarget::Float64 = 20.0 # neuron's target firing rate in Hz
|
||||
firingDiff::Float64 = 0.0 # e-prop supplement paper equation 5
|
||||
firingRateError::Float64 = 0.0 # local neuron error w.r.t. firing regularization
|
||||
firingRate::Float64 = 0.0 # running average of firing rate, Hz
|
||||
|
||||
tau_a::Float64 = 0.0 # τ_a, adaption time constant in millisecond
|
||||
beta::Float64 = 0.15 # β, constant, value from paper
|
||||
rho::Float64 = 0.0 # ρ, threshold adaptation decay factor
|
||||
a::Float64 = 0.0 # threshold adaptation
|
||||
av_th::Float64 = 0.0 # adjusted neuron firing threshold
|
||||
|
||||
""" "inference" = no learning params will be collected.
|
||||
"learning" = neuron will accumulate epsilon_j, compute Δw_rec_change each time
|
||||
correct answer is available then merge Δw_rec_change into wRecChange then
|
||||
reset epsilon_j.
|
||||
"reflect" = neuron will merge wRecChange into wRec then reset wRecChange. """
|
||||
learningStage::String = "inference"
|
||||
end
|
||||
""" alif neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
alif_neuron_params = Dict(
|
||||
:type => "alifNeuron",
|
||||
:v_th => 1.2, # neuron firing threshold (this value is treated as maximum bound if I
|
||||
use auto generate)
|
||||
:z_t => false, # neuron firing status at time = t
|
||||
:gammaPd => 0.3, # discount factor. The value is from the paper
|
||||
:refractoryDuration => 2.0, # neuron refractory period in millisecond
|
||||
:delta => 1.0,
|
||||
:tau_m => 5.0, # membrane time constant in millisecond. It should equals to time use
|
||||
for 1 sequence
|
||||
|
||||
# adaptation time constant in millisecond. It should equals to total time SNN takes to
|
||||
# perform a task i.e. equals to episode length
|
||||
:tau_a => 10.0,
|
||||
:beta => 0.15, # constant.
|
||||
:a => 0.0,
|
||||
)
|
||||
|
||||
neuron1 = alifNeuron(alif_neuron_params)
|
||||
"""
|
||||
function alifNeuron(params::Dict)
|
||||
n = alifNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
""" linearNeuron struct
|
||||
"""
|
||||
Base.@kwdef mutable struct linearNeuron <: outputNeuron
|
||||
id::Float64 = 0.0 # ID of this neuron which is it position in knowledgeFn array
|
||||
type::String = "linearNeuron"
|
||||
knowledgeFnName::String = "not defined" # knowledgeFn that this neuron belongs to
|
||||
subscriptionList::Array{Int64} = Int64[] # list of other neuron that this neuron synapse subscribed to
|
||||
timeStep::Int64 = 0 # current time
|
||||
wRec::Array{Float64} = Float64[] # synaptic weight (for receiving signal from other neuron)
|
||||
v_t::Float64 = 0.0 # vᵗ, postsynaptic neuron membrane potential of previous timestep
|
||||
v_t1::Float64 = rand() # vᵗ⁺¹, postsynaptic neuron membrane potential at current timestep
|
||||
v_th::Float64 = 1.0 # vᵗʰ, neuron firing threshold
|
||||
vRest::Float64 = 0.0 # resting potential after neuron fired
|
||||
vError::Float64 = 0.0 # used to compute model error
|
||||
z_t::Bool = false # zᵗ, neuron postsynaptic firing of previous timestep
|
||||
# zᵗ⁺¹, neuron firing status at time = t+1. I need this because the way I calculate all
|
||||
# neurons forward function at each timestep-by-timestep is to do every neuron
|
||||
# forward calculation. Each neuron requires access to other neuron's firing status
|
||||
# during v_t1 calculation hence I need a variable to hold z_t1 so that I'm not replacing z_t
|
||||
z_t1::Bool = false # neuron postsynaptic firing at current timestep (after neuron's calculation)
|
||||
|
||||
# neuron presynaptic firing at current timestep (which is other neuron postsynaptic firing of
|
||||
# previous timestep)
|
||||
z_i_t::Array{Bool} = Bool[]
|
||||
z_i_t_commulative::Array{Int64} = Int64[] # used to compute connection strength
|
||||
synapticStrength::Array{Float64} = Float64[]
|
||||
synapticStrengthLimit::NamedTuple = (lowerlimit=(-5=>-5), upperlimit=(5=>5))
|
||||
|
||||
gammaPd::Float64 = 0.3 # γ_pd, discount factor, value from paper
|
||||
alpha::Float64 = 0.0 # α, neuron membrane potential decay factor
|
||||
phi::Float64 = 0.0 # ϕ, psuedo derivative
|
||||
epsilonRec::Array{Float64} = Float64[] # ϵ_rec, eligibility vector for neuron spike
|
||||
decayedEpsilonRec::Array{Float64} = Float64[] # α * epsilonRec
|
||||
eRec::Array{Float64} = Float64[] # eligibility trace for neuron spike
|
||||
delta::Float64 = 1.0 # δ, discreate timestep size in millisecond
|
||||
refractoryDuration::Int64 = 3 # neuron's refratory period in millisecond
|
||||
refractoryCounter::Int64 = 0
|
||||
tau_out::Float64 = 0.0 # τ_out, membrane time constant in millisecond
|
||||
eta::Float64 = 0.0001 # η, learning rate
|
||||
wRecChange::Array{Float64} = Float64[] # Δw_rec, cumulated wRec change
|
||||
recSignal::Float64 = 0.0 # incoming recurrent signal
|
||||
alpha_v_t::Float64 = 0.0 # alpha * v_t
|
||||
|
||||
firingCounter::Int64 = 0 # store how many times neuron fires
|
||||
end
|
||||
|
||||
""" linear neuron outer constructor
|
||||
|
||||
# Example
|
||||
|
||||
linear_neuron_params = Dict(
|
||||
:type => "linearNeuron",
|
||||
:k => 0.9, # output leakink coefficient
|
||||
:tau_out => 5.0, # output time constant in millisecond. It should equals to time use for 1 sequence
|
||||
:out => 0.0, # neuron's output value store here
|
||||
)
|
||||
|
||||
neuron1 = linearNeuron(linear_neuron_params)
|
||||
"""
|
||||
function linearNeuron(params::Dict)
|
||||
n = linearNeuron()
|
||||
field_names = fieldnames(typeof(n))
|
||||
for i in field_names
|
||||
if i in keys(params)
|
||||
if i == :optimiser
|
||||
opt_type = string(split(params[i], ".")[end])
|
||||
n.:($i) = load_optimiser(opt_type)
|
||||
else
|
||||
n.:($i) = params[i] # assign params to n struct fields
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
# function load_optimiser(optimiser_name::String; params::Union{Dict,Nothing} = nothing)
|
||||
# if optimiser_name == "AdaBelief"
|
||||
# params = (0.01, (0.9, 0.8))
|
||||
# return Flux.Optimise.AdaBelief(params...)
|
||||
# elseif optimiser_name == "AdaBelief2"
|
||||
# # output neuron requires slower change pace so η is lower than compute neuron at 0.007
|
||||
# # because if w_out change too fast, compute neuron will not able to
|
||||
# # grapse output neuron moving direction i.e. both compute neuron's direction and
|
||||
# # output neuron direction are out of sync.
|
||||
# params = (0.007, (0.9, 0.8))
|
||||
# return Flux.Optimise.AdaBelief(params...)
|
||||
# else
|
||||
# error("optimiser is not defined yet in load_optimiser()")
|
||||
# end
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::passthroughNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
end
|
||||
|
||||
# function init_neuron!(id::Int64, n::lifNeuron, kfnParams::Dict)
|
||||
# n.id = id
|
||||
# n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
# subscription_options = shuffle!([1:(kfnParams[:input_neuron_number]+kfnParams[:computeNeuronNumber])...])
|
||||
# if typeof(kfnParams[:synapticConnectionPercent]) == String
|
||||
# percent = parse(Int, kfnParams[:synapticConnectionPercent][1:end-1]) / 100
|
||||
# synapticConnectionPercent = floor(length(subscription_options) * percent)
|
||||
# n.subscriptionList = [pop!(subscription_options) for i = 1:synapticConnectionPercent]
|
||||
# end
|
||||
# filter!(x -> x != n.id, n.subscriptionList)
|
||||
# n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
# n.wRec = Random.rand(length(n.subscriptionList))
|
||||
# n.wRecChange = zeros(length(n.subscriptionList))
|
||||
# n.reg_voltage_b = zeros(length(n.subscriptionList))
|
||||
# n.alpha = calculate_α(n)
|
||||
# end
|
||||
|
||||
function init_neuron!(id::Int64, n::lifNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscriptionList)
|
||||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_α(n)
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
function init_neuron!(id::Int64, n::alifNeuron, n_params::Dict,
|
||||
kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
subscription_options = shuffle!([1:kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
|
||||
# prevent subscription to itself by removing this neuron id
|
||||
filter!(x -> x != n.id, n.subscriptionList)
|
||||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
|
||||
# the more time has passed from the last time neuron was activated, the more
|
||||
# neuron membrane potential is reduced
|
||||
n.alpha = calculate_α(n)
|
||||
n.rho = calculate_ρ(n)
|
||||
n.epsilonRecA = zeros(length(n.subscriptionList))
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
|
||||
function init_neuron!(id::Int64, n::linearNeuron, n_params::Dict, kfnParams::Dict)
|
||||
n.id = id
|
||||
n.knowledgeFnName = kfnParams[:knowledgeFnName]
|
||||
|
||||
subscription_options = shuffle!([kfnParams[:totalInputPort]+1 : kfnParams[:totalNeurons]...])
|
||||
subscription_numbers = Int(floor((n_params[:synapticConnectionPercent] / 100.0) *
|
||||
kfnParams[:totalNeurons] - kfnParams[:totalInputPort]))
|
||||
n.subscriptionList = [pop!(subscription_options) for i = 1:subscription_numbers]
|
||||
n.synapticStrength = rand(-5:0.01:-4, length(n.subscriptionList))
|
||||
|
||||
n.epsilonRec = zeros(length(n.subscriptionList))
|
||||
n.wRec = rand(-0.2:0.01:0.2, length(n.subscriptionList))
|
||||
n.wRecChange = zeros(length(n.subscriptionList))
|
||||
n.alpha = calculate_k(n)
|
||||
n.z_i_t_commulative = zeros(length(n.subscriptionList))
|
||||
end
|
||||
|
||||
""" Make a neuron intended for use with knowledgeFn
|
||||
"""
|
||||
function init_neuron(id::Int64, n_params::Dict, kfnParams::Dict)
|
||||
n = instantiate_custom_types(n_params)
|
||||
init_neuron!(id, n, n_params, kfnParams)
|
||||
|
||||
return n
|
||||
end
|
||||
|
||||
""" This function instantiate Ironpen type.
|
||||
|
||||
# Example
|
||||
|
||||
new_model = instantiate_custom_types("model")
|
||||
"""
|
||||
function instantiate_custom_types(params::Union{Dict,Nothing} = nothing)
|
||||
type = string(split(params[:type], ".")[end])
|
||||
|
||||
if type == "model"
|
||||
return model()
|
||||
elseif type == "knowledgeFn"
|
||||
return knowledgeFn()
|
||||
elseif type == "passthroughNeuron"
|
||||
return passthroughNeuron(params)
|
||||
elseif type == "lifNeuron"
|
||||
return lifNeuron(params)
|
||||
elseif type == "alifNeuron"
|
||||
return alifNeuron(params)
|
||||
elseif type == "linearNeuron"
|
||||
return linearNeuron(params)
|
||||
else
|
||||
return nothing
|
||||
end
|
||||
end
|
||||
|
||||
""" Add a new neuron into a knowledgeFn
|
||||
|
||||
# Example
|
||||
add_neuron!(kfn.kfnParams[:lif_neuron_params], kfn)
|
||||
"""
|
||||
# function add_neuron!(neuron_Dict::Dict, kfn::knowledgeFn)
|
||||
# id = length(kfn.neuronsArray) + 1
|
||||
# neuron = init_neuron(id, neuron_Dict, kfn.kfnParams,
|
||||
# totalNeurons = (length(kfn.neuronsArray) + 1))
|
||||
# push!(kfn.neuronsArray, neuron)
|
||||
|
||||
# # Randomly select an output neuron to add a new neuron to
|
||||
# add_n_output_n!(Random.rand(kfn.outputNeuronsArray), id)
|
||||
# end
|
||||
|
||||
calculate_α(neuron::lifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_α(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_m)
|
||||
calculate_ρ(neuron::alifNeuron) = exp(-neuron.delta / neuron.tau_a)
|
||||
calculate_k(neuron::linearNeuron) = exp(-neuron.delta / neuron.tau_out)
|
||||
|
||||
#------------------------------------------------------------------------------------------------100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
end # module end
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user