diff --git a/axon/enumgen.go b/axon/enumgen.go index abe9eb8dc..6df6e6543 100644 --- a/axon/enumgen.go +++ b/axon/enumgen.go @@ -135,6 +135,49 @@ func (i *LayerTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "LayerTypes") } +var _SynCaFunsValues = []SynCaFuns{0, 1, 2} + +// SynCaFunsN is the highest valid value for type SynCaFuns, plus one. +const SynCaFunsN SynCaFuns = 3 + +var _SynCaFunsValueMap = map[string]SynCaFuns{`StdSynCa`: 0, `LinearSynCa`: 1, `NeurSynCa`: 2} + +var _SynCaFunsDescMap = map[SynCaFuns]string{0: `StdSynCa uses standard synaptic calcium integration method`, 1: `LinearSynCa uses linear regression generated calcium integration (much faster)`, 2: `NeurSynCa uses simple product of separately-integrated neuron values (much faster)`} + +var _SynCaFunsMap = map[SynCaFuns]string{0: `StdSynCa`, 1: `LinearSynCa`, 2: `NeurSynCa`} + +// String returns the string representation of this SynCaFuns value. +func (i SynCaFuns) String() string { return enums.String(i, _SynCaFunsMap) } + +// SetString sets the SynCaFuns value from its string representation, +// and returns an error if the string is invalid. +func (i *SynCaFuns) SetString(s string) error { + return enums.SetString(i, s, _SynCaFunsValueMap, "SynCaFuns") +} + +// Int64 returns the SynCaFuns value as an int64. +func (i SynCaFuns) Int64() int64 { return int64(i) } + +// SetInt64 sets the SynCaFuns value from an int64. +func (i *SynCaFuns) SetInt64(in int64) { *i = SynCaFuns(in) } + +// Desc returns the description of the SynCaFuns value. +func (i SynCaFuns) Desc() string { return enums.Desc(i, _SynCaFunsDescMap) } + +// SynCaFunsValues returns all possible values for the type SynCaFuns. +func SynCaFunsValues() []SynCaFuns { return _SynCaFunsValues } + +// Values returns all possible values for the type SynCaFuns. +func (i SynCaFuns) Values() []enums.Enum { return enums.Values(_SynCaFunsValues) } + +// MarshalText implements the [encoding.TextMarshaler] interface. +func (i SynCaFuns) MarshalText() ([]byte, error) { return []byte(i.String()), nil } + +// UnmarshalText implements the [encoding.TextUnmarshaler] interface. +func (i *SynCaFuns) UnmarshalText(text []byte) error { + return enums.UnmarshalText(i, text, "SynCaFuns") +} + var _DAModTypesValues = []DAModTypes{0, 1, 2, 3} // DAModTypesN is the highest valid value for type DAModTypes, plus one. diff --git a/axon/learn.go b/axon/learn.go index 1946bd534..ec96a0d56 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -682,9 +682,26 @@ func (ls *LRateParams) Init() { ls.UpdateEff() } +// SynCaFuns are different ways of computing synaptic calcium (experimental) +type SynCaFuns int32 //enums:enum + +const ( + // StdSynCa uses standard synaptic calcium integration method + StdSynCa SynCaFuns = iota + + // LinearSynCa uses linear regression generated calcium integration (much faster) + LinearSynCa + + // NeurSynCa uses simple product of separately-integrated neuron values (much faster) + NeurSynCa +) + // TraceParams manages parameters associated with temporal trace learning type TraceParams struct { + // how to compute the synaptic calcium (experimental) + SynCa SynCaFuns + // time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace Tau float32 `default:"1,2,4"` @@ -696,9 +713,12 @@ type TraceParams struct { // rate = 1 / tau Dt float32 `view:"-" json:"-" xml:"-" edit:"-"` + + pad, pad1, pad2 float32 } func (tp *TraceParams) Defaults() { + tp.SynCa = LinearSynCa tp.Tau = 1 tp.SubMean = 0 tp.LearnThr = 0 diff --git a/axon/pathparams.go b/axon/pathparams.go index c1d44483b..b65e9707c 100644 --- a/axon/pathparams.go +++ b/axon/pathparams.go @@ -277,8 +277,7 @@ func (pj *PathParams) GatherSpikes(ctx *Context, ly *LayerParams, ni, di uint32, // DoSynCa returns false if should not do synaptic-level calcium updating. // Done by default in Cortex, not for some other special pathway types. func (pj *PathParams) DoSynCa() bool { - if pj.PathType == RWPath || pj.PathType == TDPredPath || pj.PathType == VSMatrixPath || - pj.PathType == DSMatrixPath || pj.PathType == VSPatchPath || pj.PathType == BLAPath { + if pj.Learn.Trace.SynCa != StdSynCa || pj.PathType == RWPath || pj.PathType == TDPredPath || pj.PathType == VSMatrixPath || pj.PathType == DSMatrixPath || pj.PathType == VSPatchPath || pj.PathType == BLAPath || pj.Learn.Hebb.On.IsTrue() { return false } return true @@ -338,28 +337,36 @@ func (pj *PathParams) DWtSyn(ctx *Context, syni, si, ri, di uint32, layPool, sub // Uses synaptically integrated spiking, computed at the Theta cycle interval. // This is the trace version for hidden units, and uses syn CaP - CaD for targets. func (pj *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, di uint32, layPool, subPool *Pool, isTarget bool) { - // credit assignment part - caUpT := SynCaV(ctx, syni, di, CaUpT) // time of last update - syCaM := SynCaV(ctx, syni, di, CaM) // fast time scale - syCaP := SynCaV(ctx, syni, di, CaP) // slower but still fast time scale, drives Potentiation - syCaD := SynCaV(ctx, syni, di, CaD) // slow time scale, drives Depression (one trial = 200 cycles) - pj.Learn.KinaseCa.CurCa(ctx.SynCaCtr, caUpT, &syCaM, &syCaP, &syCaD) // always update, getting current Ca (just optimization) - - rb0 := NrnV(ctx, ri, di, SpkBin0) - sb0 := NrnV(ctx, si, di, SpkBin0) - rb1 := NrnV(ctx, ri, di, SpkBin1) - sb1 := NrnV(ctx, si, di, SpkBin1) - rb2 := NrnV(ctx, ri, di, SpkBin2) - sb2 := NrnV(ctx, si, di, SpkBin2) - rb3 := NrnV(ctx, ri, di, SpkBin3) - sb3 := NrnV(ctx, si, di, SpkBin3) - - b0 := 0.1 * (rb0 * sb0) - b1 := 0.1 * (rb1 * sb1) - b2 := 0.1 * (rb2 * sb2) - b3 := 0.1 * (rb3 * sb3) - - pj.Learn.KinaseCa.FinalCa(b0, b1, b2, b3, &syCaM, &syCaP, &syCaD) + var syCaM, syCaP, syCaD, caUpT float32 + switch pj.Learn.Trace.SynCa { + case StdSynCa: + caUpT = SynCaV(ctx, syni, di, CaUpT) // time of last update + syCaM = SynCaV(ctx, syni, di, CaM) // fast time scale + syCaP = SynCaV(ctx, syni, di, CaP) // slower but still fast time scale, drives Potentiation + syCaD = SynCaV(ctx, syni, di, CaD) // slow time scale, drives Depression (one trial = 200 cycles) + pj.Learn.KinaseCa.CurCa(ctx.SynCaCtr, caUpT, &syCaM, &syCaP, &syCaD) // always update, getting current Ca (just optimization) + case LinearSynCa: + rb0 := NrnV(ctx, ri, di, SpkBin0) + sb0 := NrnV(ctx, si, di, SpkBin0) + rb1 := NrnV(ctx, ri, di, SpkBin1) + sb1 := NrnV(ctx, si, di, SpkBin1) + rb2 := NrnV(ctx, ri, di, SpkBin2) + sb2 := NrnV(ctx, si, di, SpkBin2) + rb3 := NrnV(ctx, ri, di, SpkBin3) + sb3 := NrnV(ctx, si, di, SpkBin3) + + b0 := 0.1 * (rb0 * sb0) + b1 := 0.1 * (rb1 * sb1) + b2 := 0.1 * (rb2 * sb2) + b3 := 0.1 * (rb3 * sb3) + + pj.Learn.KinaseCa.FinalCa(b0, b1, b2, b3, &syCaM, &syCaP, &syCaD) + case NeurSynCa: + gain := float32(1.0) + syCaM = gain * NrnV(ctx, si, di, CaSpkM) * NrnV(ctx, ri, di, CaSpkM) + syCaP = gain * NrnV(ctx, si, di, CaSpkP) * NrnV(ctx, ri, di, CaSpkP) + syCaD = gain * NrnV(ctx, si, di, CaSpkD) * NrnV(ctx, ri, di, CaSpkD) + } SetSynCaV(ctx, syni, di, CaM, syCaM) SetSynCaV(ctx, syni, di, CaP, syCaP) diff --git a/axon/rand.go b/axon/rand.go index 53d0a6367..9c239b6e1 100644 --- a/axon/rand.go +++ b/axon/rand.go @@ -2,7 +2,6 @@ package axon import ( "cogentcore.org/core/vgpu/gosl/slrand" - "cogentcore.org/core/vgpu/gosl/sltype" ) //gosl:hlsl axonrand @@ -32,8 +31,7 @@ func GetRandomNumber(index uint32, counter slrand.Counter, funIndex RandFunIndex var randCtr slrand.Counter randCtr = counter randCtr.Add(uint32(funIndex)) - var ctr sltype.Uint2 - ctr = randCtr.Uint2() + ctr := randCtr.Uint2() return slrand.Float(&ctr, index) } diff --git a/axon/shaders/Makefile b/axon/shaders/Makefile index d1aca2dce..ef3063528 100644 --- a/axon/shaders/Makefile +++ b/axon/shaders/Makefile @@ -2,7 +2,7 @@ # The go generate command does this automatically. all: - cd ../; gosl -exclude=Update,UpdateParams,Defaults,AllParams,ShouldShow cogentcore.org/core/math32/v2/fastexp.go cogentcore.org/core/etable/v2/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/v2/etime github.com/emer/emergent/v2/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl + cd ../; gosl -exclude=Update,UpdateParams,Defaults,AllParams,ShouldShow cogentcore.org/core/math32/fastexp.go cogentcore.org/core/math32/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/v2/etime github.com/emer/emergent/v2/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl # note: gosl automatically compiles the hlsl files using this command: %.spv : %.hlsl diff --git a/axon/shaders/gpu_dwt.spv b/axon/shaders/gpu_dwt.spv index abf2004ab..531f0e18f 100644 Binary files a/axon/shaders/gpu_dwt.spv and b/axon/shaders/gpu_dwt.spv differ diff --git a/axon/shaders/gpu_dwtfmdi.spv b/axon/shaders/gpu_dwtfmdi.spv index 614a45e87..7b97926c3 100644 Binary files a/axon/shaders/gpu_dwtfmdi.spv and b/axon/shaders/gpu_dwtfmdi.spv differ diff --git a/axon/shaders/gpu_dwtsubmean.spv b/axon/shaders/gpu_dwtsubmean.spv index b11cb9768..8d4cdab4d 100644 Binary files a/axon/shaders/gpu_dwtsubmean.spv and b/axon/shaders/gpu_dwtsubmean.spv differ diff --git a/axon/shaders/gpu_gather.spv b/axon/shaders/gpu_gather.spv index d10314d3b..ab7365e8c 100644 Binary files a/axon/shaders/gpu_gather.spv and b/axon/shaders/gpu_gather.spv differ diff --git a/axon/shaders/gpu_newstate_pool.spv b/axon/shaders/gpu_newstate_pool.spv index 4e1070cdd..75c7b476d 100644 Binary files a/axon/shaders/gpu_newstate_pool.spv and b/axon/shaders/gpu_newstate_pool.spv differ diff --git a/axon/shaders/gpu_sendspike.spv b/axon/shaders/gpu_sendspike.spv index 5d37ba2ea..358f395f7 100644 Binary files a/axon/shaders/gpu_sendspike.spv and b/axon/shaders/gpu_sendspike.spv differ diff --git a/axon/shaders/gpu_synca.spv b/axon/shaders/gpu_synca.spv index a1332daec..b7dca2a6c 100644 Binary files a/axon/shaders/gpu_synca.spv and b/axon/shaders/gpu_synca.spv differ diff --git a/axon/shaders/gpu_wtfmdwt.spv b/axon/shaders/gpu_wtfmdwt.spv index b5583932d..63337c4a1 100644 Binary files a/axon/shaders/gpu_wtfmdwt.spv and b/axon/shaders/gpu_wtfmdwt.spv differ diff --git a/axon/typegen.go b/axon/typegen.go index 5cd82a0b2..34a6af4b5 100644 --- a/axon/typegen.go +++ b/axon/typegen.go @@ -106,7 +106,9 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SWtParams" var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LRateParams", IDName: "l-rate-params", Doc: "LRateParams manages learning rate parameters", Directives: []types.Directive{{Tool: "gosl", Directive: "start", Args: []string{"learn"}}}, Fields: []types.Field{{Name: "Base", Doc: "base learning rate for this pathway -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2."}, {Name: "Sched", Doc: "scheduled learning rate multiplier, simulating reduction in plasticity over aging"}, {Name: "Mod", Doc: "dynamic learning rate modulation due to neuromodulatory or other such factors"}, {Name: "Eff", Doc: "effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base"}}}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TraceParams", IDName: "trace-params", Doc: "TraceParams manages parameters associated with temporal trace learning", Fields: []types.Field{{Name: "Tau", Doc: "time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace"}, {Name: "SubMean", Doc: "amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning pathways, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special path types (e.g., Hebb) benefit from SubMean = 1 always"}, {Name: "LearnThr", Doc: "threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations"}, {Name: "Dt", Doc: "rate = 1 / tau"}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SynCaFuns", IDName: "syn-ca-funs", Doc: "SynCaFuns are different ways of computing synaptic calcium (experimental)"}) + +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TraceParams", IDName: "trace-params", Doc: "TraceParams manages parameters associated with temporal trace learning", Fields: []types.Field{{Name: "SynCa", Doc: "how to compute the synaptic calcium (experimental)"}, {Name: "Tau", Doc: "time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace"}, {Name: "SubMean", Doc: "amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning pathways, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special path types (e.g., Hebb) benefit from SubMean = 1 always"}, {Name: "LearnThr", Doc: "threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations"}, {Name: "Dt", Doc: "rate = 1 / tau"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LRateMod", IDName: "l-rate-mod", Doc: "LRateMod implements global learning rate modulation, based on a performance-based\nfactor, for example error. Increasing levels of the factor = higher learning rate.\nThis can be added to a Sim and called prior to DWt() to dynamically change lrate\nbased on overall network performance.", Fields: []types.Field{{Name: "On", Doc: "toggle use of this modulation factor"}, {Name: "Base", Doc: "baseline learning rate -- what you get for correct cases"}, {Name: "pad"}, {Name: "pad1"}, {Name: "Range", Doc: "defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1"}}}) diff --git a/examples/deep_fsa/params.go b/examples/deep_fsa/params.go index 48f058daf..a23738472 100644 --- a/examples/deep_fsa/params.go +++ b/examples/deep_fsa/params.go @@ -83,8 +83,9 @@ var ParamSets = netparams.Sets{ }}, {Sel: "Path", Desc: "std", Params: params.Params{ + "Path.Learn.Trace.SynCa": "LinearSynCa", "Path.Learn.Trace.SubMean": "0", // 0 > 1 -- even with CTCtxt = 0 - "Path.Learn.LRate.Base": "0.03", // .03 > others -- same as CtCtxt + "Path.Learn.LRate.Base": "0.02", // .03 > others -- same as CtCtxt "Path.SWts.Adapt.LRate": "0.01", // 0.01 or 0.0001 music "Path.SWts.Init.SPct": "1.0", // 1 works fine here -- .5 also ok "Path.Com.PFail": "0.0", diff --git a/examples/kinaseq/kinaseq.go b/examples/kinaseq/kinaseq.go index c0ad7c797..78ca983a8 100644 --- a/examples/kinaseq/kinaseq.go +++ b/examples/kinaseq/kinaseq.go @@ -8,23 +8,15 @@ import ( "fmt" "math/rand" "reflect" + "strings" "cogentcore.org/core/math32" "cogentcore.org/core/math32/minmax" - "cogentcore.org/core/tensor" "cogentcore.org/core/tensor/stats/stats" - "github.com/emer/emergent/v2/decoder" "github.com/emer/emergent/v2/elog" "github.com/emer/emergent/v2/etime" ) -const ( - NBins = 20 - CyclesPerBin = 10 - NOutputs = 3 - NInputs = NBins + 2 // per neuron -) - // KinaseNeuron has Neuron state type KinaseNeuron struct { // Neuron spiking (0,1) @@ -42,7 +34,7 @@ type KinaseNeuron struct { TotalSpikes float32 // binned count of spikes, for regression learning - BinnedSpikes [NBins]float32 + BinnedSpikes [4]float32 } func (kn *KinaseNeuron) Init() { @@ -58,13 +50,14 @@ func (kn *KinaseNeuron) StartTrial() { for i := range kn.BinnedSpikes { kn.BinnedSpikes[i] = 0 } + // kn.CaSyn = 0 // note: better fits with carryover } // Cycle does one cycle of neuron updating, with given exponential spike interval // based on target spiking firing rate. func (kn *KinaseNeuron) Cycle(expInt float32, params *ParamConfig, cyc int) { kn.Spike = 0 - bin := cyc / CyclesPerBin + bin := cyc / 50 if expInt > 0 { kn.SpikeP *= rand.Float32() if kn.SpikeP <= expInt { @@ -143,8 +136,11 @@ type KinaseState struct { // Standard synapse values StdSyn KinaseSynapse - // Linearion synapse values + // Linear synapse values LinearSyn KinaseSynapse + + // binned integration of send, recv spikes + BinnedSums [4]float32 } func (ks *KinaseState) Init() { @@ -160,8 +156,6 @@ func (kn *KinaseState) StartTrial() { } func (ss *Sim) ConfigKinase() { - ss.Linear.Init(NOutputs, NInputs*2, 0, decoder.IdentityFunc) - ss.Linear.LRate = ss.Config.Params.LRate } // Sweep runs a sweep through minus-plus ranges @@ -268,20 +262,14 @@ func (ss *Sim) TrialImpl(minusHz, plusHz float32) { } ks.StdSyn.DWt = ks.StdSyn.CaP - ks.StdSyn.CaD - ks.Send.SetInput(ss.Linear.Inputs, 0) - ks.Recv.SetInput(ss.Linear.Inputs, NInputs) - ss.Linear.Forward() - out := make([]float32, NOutputs) - ss.Linear.Output(&out) - ks.LinearSyn.CaM = out[0] - ks.LinearSyn.CaP = out[1] - ks.LinearSyn.CaD = out[2] + for i := range ks.BinnedSums { + ks.BinnedSums[i] = 0.1 * (ks.Recv.BinnedSpikes[i] * ks.Send.BinnedSpikes[i]) + } + + ss.CaParams.FinalCa(ks.BinnedSums[0], ks.BinnedSums[1], ks.BinnedSums[2], ks.BinnedSums[3], &ks.LinearSyn.CaM, &ks.LinearSyn.CaP, &ks.LinearSyn.CaD) ks.LinearSyn.DWt = ks.LinearSyn.CaP - ks.LinearSyn.CaD if ks.Train { - targ := [NOutputs]float32{ks.StdSyn.CaM, ks.StdSyn.CaP, ks.StdSyn.CaD} - sse, _ := ss.Linear.Train(targ[:]) - ks.SSE = sse ss.Logs.LogRow(etime.Train, etime.Cycle, 0) ss.GUI.UpdatePlot(etime.Train, etime.Cycle) ss.Logs.LogRow(etime.Train, etime.Trial, ks.Trial) @@ -308,7 +296,6 @@ func (ss *Sim) Train() { ss.Logs.LogRow(etime.Train, etime.Condition, ss.Kinase.Condition) ss.GUI.UpdatePlot(etime.Train, etime.Condition) } - tensor.SaveCSV(&ss.Linear.Weights, "trained.wts", '\t') } func (ss *Sim) ConfigKinaseLogItems() { @@ -320,7 +307,7 @@ func (ss *Sim) ConfigKinaseLogItems() { tn := len(times) WalkFields(val, func(parent reflect.Value, field reflect.StructField, value reflect.Value) bool { - if field.Name == "BinnedSpikes" { + if strings.HasPrefix(field.Name, "Binned") { return false } return true diff --git a/examples/kinaseq/neuron.go b/examples/kinaseq/neuron.go index 24b4ade76..0e90c30b2 100644 --- a/examples/kinaseq/neuron.go +++ b/examples/kinaseq/neuron.go @@ -189,10 +189,10 @@ func (ss *Sim) NeuronUpdate(nt *axon.Network, inputOn bool) { syni := uint32(0) pj := ly.RcvPaths[0] - snCaSyn := pj.Params.Learn.KinaseCa.SpikeG * axon.NrnV(ctx, ni, di, axon.CaSyn) + snCaSyn := pj.Params.Learn.KinaseCa.CaScale * axon.NrnV(ctx, ni, di, axon.CaSyn) pj.Params.SynCaSyn(ctx, syni, ri, di, snCaSyn, updtThr) - rnCaSyn := pj.Params.Learn.KinaseCa.SpikeG * axon.NrnV(ctx, ri, di, axon.CaSyn) + rnCaSyn := pj.Params.Learn.KinaseCa.CaScale * axon.NrnV(ctx, ri, di, axon.CaSyn) if axon.NrnV(ctx, si, di, axon.Spike) <= 0 { // NOT already handled in send version pj.Params.SynCaSyn(ctx, syni, si, di, rnCaSyn, updtThr) } diff --git a/examples/kinaseq/sim.go b/examples/kinaseq/sim.go index a45af3426..b5b0f2d14 100644 --- a/examples/kinaseq/sim.go +++ b/examples/kinaseq/sim.go @@ -56,7 +56,7 @@ type Sim struct { Config Config // Kinase SynCa params - CaParams kinase.CaParams + CaParams kinase.SynCaParams // Kinase state Kinase KinaseState diff --git a/examples/objrec/params.go b/examples/objrec/params.go index 29465c105..b60798c2d 100644 --- a/examples/objrec/params.go +++ b/examples/objrec/params.go @@ -69,6 +69,7 @@ var ParamSets = netparams.Sets{ }}, {Sel: "Path", Desc: "yes extra learning factors", Params: params.Params{ + "Path.Learn.Trace.SynCa": "LinearSynCa", "Path.Learn.LRate.Base": "0.2", // 0.4 for NeuronCa; 0.2 best, 0.1 nominal "Path.Learn.Trace.SubMean": "1", // 1 -- faster if 0 until 20 epc -- prevents sig amount of late deterioration "Path.SWts.Adapt.LRate": "0.0001", // 0.005 == .1 == .01 diff --git a/examples/ra25/params.go b/examples/ra25/params.go index 6f86b9a94..e4a224be3 100644 --- a/examples/ra25/params.go +++ b/examples/ra25/params.go @@ -41,10 +41,11 @@ var ParamSets = netparams.Sets{ }}, {Sel: "Path", Desc: "basic path params", Params: params.Params{ - "Path.Learn.LRate.Base": "0.1", // 0.1 learns fast but dies early, .02 is stable long term - "Path.SWts.Adapt.LRate": "0.1", // .1 >= .2, - "Path.SWts.Init.SPct": "0.5", // .5 >= 1 here -- 0.5 more reliable, 1.0 faster.. - "Path.Learn.Trace.SubMean": "0", // 1 > 0 for long run stability + "Path.Learn.Trace.SynCa": "LinearSynCa", + "Path.Learn.LRate.Base": "0.05", // 0.1 learns fast but dies early, .02 is stable long term + "Path.SWts.Adapt.LRate": "0.1", // .1 >= .2, + "Path.SWts.Init.SPct": "0.5", // .5 >= 1 here -- 0.5 more reliable, 1.0 faster.. + "Path.Learn.Trace.SubMean": "0", // 1 > 0 for long run stability }}, {Sel: ".BackPath", Desc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates", Params: params.Params{ diff --git a/kinase/params.go b/kinase/params.go index dae3dd5cb..488b7b3f5 100644 --- a/kinase/params.go +++ b/kinase/params.go @@ -240,7 +240,7 @@ func (kp *SynCaParams) CurCa(ctime, utime float32, caM, caP, caD *float32) { // FinalCa uses a linear regression to compute the final Ca values func (kp *SynCaParams) FinalCa(bin0, bin1, bin2, bin3 float32, caM, caP, caD *float32) { - if bin0+bin1+bin2+bin3 < 0.1 { + if bin0+bin1+bin2+bin3 < 0.01 { *caM = 0 *caP = 0 *caD = 0