diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 2d760c55..8acd8a98 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -404,7 +404,7 @@ function K!(Kg, g, grid, beta, dudc, f, f_prime, shocks) for (i, y) in enumerate(grid) function h(c) vals = dudc.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks - return dudc * c - beta * mean(vals) + return dudc(c) - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 6036bf7e..68fb862a 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -216,7 +216,7 @@ The first step is to bring in the model that we used in the {doc}`Coleman policy ```{code-cell} julia # model -function Model(alpha = 0.65, # productivity parameter +function Model(;alpha = 0.65, # productivity parameter beta = 0.95, # discount factor gamma = 1.0, # risk aversion mu = 0.0, # lognorm(mu, sigma)