diff --git a/docs/src/tutorials/spectralDCM.jl b/docs/src/tutorials/spectralDCM.jl index d92c9560..b1781f0b 100644 --- a/docs/src/tutorials/spectralDCM.jl +++ b/docs/src/tutorials/spectralDCM.jl @@ -72,7 +72,7 @@ end # ## Run the simulation and plot the results -# setup simulation of the model, time in milliseconds +# setup simulation of the model, time in seconds tspan = (0.0, 512.0) prob = SDEProblem(simmodel, [], tspan) dt = 2 # 2 seconds (units are milliseconds) as measurement interval for fMRI @@ -155,7 +155,7 @@ for (i, idx) in enumerate(CartesianIndices(A_prior)) end # we avoid simplification of the model in order to exclude some parameters from fitting @named fitmodel = system_from_graph(g, simplify=false) -# With the function `changetune` we can provide a dictionary of parameters whose tunable flag should be changed, for instance set to false to exclude them from the optimizatoin procedure. +# With the function `changetune`` we can provide a dictionary of parameters whose tunable flag should be changed, for instance set to false to exclude them from the optimizatoin procedure. # For instance the the effective connections that are set to zero in the simulation: untune = Dict(A[3] => false, A[7] => false) fitmodel = changetune(fitmodel, untune) # 3 and 7 are not present in the simulation model diff --git a/src/datafitting/spDCM_VL.jl b/src/datafitting/spDCM_VL.jl index 75d2cfd2..3594d92e 100644 --- a/src/datafitting/spDCM_VL.jl +++ b/src/datafitting/spDCM_VL.jl @@ -311,7 +311,7 @@ function integration_step(dfdx, f, v, solenoid=false) Q = Q/opnorm(Q, 2)/8; f = f - Q*f; - dfdx = dfdx - Q*dfdx; + dfdx = dfdx - Q*dfdx; end # (expm(dfdx*t) - I)*inv(dfdx)*f ~~~ could also be done with expv (expv(t, dFdθθ, dFdθθ \ dFdθ) - dFdθθ \ dFdθ) but doesn't work with Dual. @@ -320,9 +320,9 @@ function integration_step(dfdx, f, v, solenoid=false) t = exp(v - spm_logdet(dfdx)/n) if t > exp(16) - dx = - dfdx \ f # -inv(dfdx)*f + dx = - dfdx \ f # -inv(dfdx)*f else - dx = (exp(t * dfdx) - I) * inv(dfdx)*f # (expm(dfdx*t) - I)*inv(dfdx)*f + dx = (exp(t * dfdx) - I) * inv(dfdx)*f # (expm(dfdx*t) - I)*inv(dfdx)*f end return dx