From 43460d8d5f6d92a1ff0563cda4e7e08c9f390d10 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Tue, 29 Aug 2023 10:08:25 -0400 Subject: [PATCH 01/19] fixed_with_kw --- lectures/dynamic_programming/coleman_policy_iter.md | 4 ++-- lectures/dynamic_programming/discrete_dp.md | 2 +- lectures/dynamic_programming/egm_policy_iter.md | 3 ++- lectures/dynamic_programming/mccall_model.md | 2 +- .../mccall_model_with_separation.md | 3 ++- lectures/dynamic_programming/optgrowth.md | 4 ++-- lectures/dynamic_programming/smoothing.md | 10 +++++----- lectures/dynamic_programming/wald_friedman.md | 4 ++-- 8 files changed, 17 insertions(+), 15 deletions(-) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 71faf93b..95c3d181 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -464,7 +464,7 @@ Here's an object containing data from the log-linear growth model we used in the ```{code-cell} julia isoelastic(c, γ) = isone(γ) ? log(c) : (c^(1 - γ) - 1) / (1 - γ) -Model = @with_kw (α = 0.65, # Productivity parameter +Model(α = 0.65, # Productivity parameter β = 0.95, # Discount factor γ = 1.0, # Risk aversion μ = 0.0, # First parameter in lognorm(μ, σ) @@ -477,7 +477,7 @@ Model = @with_kw (α = 0.65, # Productivity parameter ∂u∂c = c -> c^(-γ), # u′ f = k -> k^α, # production function f′ = k -> α * k^(α - 1), # f′ - ) + ) = (;α,β,γ ,μ,s,grid,grid_min,grid_max,grid_size,u,∂u∂c,f,f′) ``` Next we generate an instance diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index 3ef0a180..bb02a9b9 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -432,7 +432,7 @@ using BenchmarkTools, Plots, QuantEcon, Parameters ``` ```{code-cell} julia -SimpleOG = @with_kw (B = 10, M = 5, α = 0.5, β = 0.9) +SimpleOG(B = 10, M = 5, α = 0.5, β = 0.9)= (;B, M , α , β ) function transition_matrices(g) (;B, M, α, β) = g diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 404901c8..55a679d6 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -215,7 +215,7 @@ The first step is to bring in the model that we used in the {doc}`Coleman policy ```{code-cell} julia # model -Model = @with_kw (α = 0.65, # productivity parameter +Model(α = 0.65, # productivity parameter β = 0.95, # discount factor γ = 1.0, # risk aversion μ = 0.0, # lognorm(μ, σ) @@ -228,6 +228,7 @@ Model = @with_kw (α = 0.65, # productivity parameter f = k-> k^α, # production function f′ = k -> α*k^(α-1), # f' grid = range(grid_min, grid_max, length = grid_size)) # grid + = (;α,β ,γ,μ ,s ,grid_min,grid_max ,grid_size ,u,u′ ,f ,f′,grid ) ``` Next we generate an instance diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index f18aaa0d..e9125c07 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm = @with_kw (c=25.0, β=0.99, w=w) # named tuples +mcm (c=25.0, β=0.99, w=w) = (;c, β, w )# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 37e9f3ab..5751bb94 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -287,7 +287,7 @@ We'll use the default parameterizations found in the code above. u(c, σ) = (c^(1 - σ) - 1) / (1 - σ) # model constructor -McCallModel = @with_kw (α = 0.2, +McCallModel(α = 0.2, β = 0.98, # discount rate γ = 0.7, c = 6.0, # unemployment compensation @@ -295,6 +295,7 @@ McCallModel = @with_kw (α = 0.2, u = u, # utility function w = range(10, 20, length = 60), # wage values dist = BetaBinomial(59, 600, 400)) # distribution over wage values + = (;α, β , γ ,c , σ,u , w , dist) ``` ```{code-cell} julia diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index 73dc67d1..9ae8e6cc 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -565,11 +565,11 @@ In addition to the model parameters, we need a grid and some shock draws for Mon Random.seed!(42) # for reproducible results u(c;p) = log(c) # utility f(k;p) = k^p.α # deterministic part of production function -OptimalGrowthModel = @with_kw (α = 0.4, β = 0.96, μ = 0.0, s = 0.1, +OptimalGrowthModel(α = 0.4, β = 0.96, μ = 0.0, s = 0.1, u = u, f = f, # defaults defined above y = range(1e-5, 4.0, length = 200), # grid on y ξ = exp.(μ .+ s * randn(250)) # monte carlo shocks -) # named tuples defaults +) = (; α, β, μ, s , u , f , y , ξ ) # named tuples defaults # True value and policy function function v_star(y;p) diff --git a/lectures/dynamic_programming/smoothing.md b/lectures/dynamic_programming/smoothing.md index 7375ce8f..8e552093 100644 --- a/lectures/dynamic_programming/smoothing.md +++ b/lectures/dynamic_programming/smoothing.md @@ -361,11 +361,11 @@ using Parameters, Plots, QuantEcon, Random ``` ```{code-cell} julia -ConsumptionProblem = @with_kw (β = 0.96, - y = [2.0, 1.5], - b0 = 3.0, - P = [0.8 0.2 - 0.4 0.6]) +ConsumptionProblem(β = 0.96, + y = [2.0, 1.5], + b0 = 3.0, + P = [0.8 0.2 + 0.4 0.6]) = (; β ,y,b0,P) function consumption_complete(cp) diff --git a/lectures/dynamic_programming/wald_friedman.md b/lectures/dynamic_programming/wald_friedman.md index 2f1aa299..852babe9 100644 --- a/lectures/dynamic_programming/wald_friedman.md +++ b/lectures/dynamic_programming/wald_friedman.md @@ -504,10 +504,10 @@ function simulation(problem) return return_output ? (α, β, outcomes, costs, trials) : nothing end -Problem = @with_kw (d0 = Beta(1,1), d1 = Beta(9,9), +Problem(d0 = Beta(1,1), d1 = Beta(9,9), L0 = 2, L1 = 2, c = 0.2, p = 0.5, - n = 100, return_output = false); + n = 100, return_output = false) = (;d0,d1,L0,L1,c,p,n,return_output ); ``` ```{code-cell} julia From 5ee0a244861f72bcb490aefa9767c20345ca20e7 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Tue, 29 Aug 2023 10:34:00 -0400 Subject: [PATCH 02/19] formating --- lectures/dynamic_programming/career.md | 90 +++++------ .../coleman_policy_iter.md | 108 ++++++------- lectures/dynamic_programming/discrete_dp.md | 104 ++++++------ lectures/dynamic_programming/ifp.md | 38 ++--- lectures/dynamic_programming/jv.md | 150 +++++++++--------- lectures/dynamic_programming/lqcontrol.md | 111 ++++++------- lectures/dynamic_programming/odu.md | 128 +++++++-------- lectures/dynamic_programming/optgrowth.md | 101 ++++++------ lectures/dynamic_programming/perm_income.md | 46 +++--- .../dynamic_programming/perm_income_cons.md | 126 +++++++-------- lectures/dynamic_programming/robustness.md | 71 ++++----- lectures/dynamic_programming/smoothing.md | 93 ++++++----- lectures/dynamic_programming/wald_friedman.md | 91 ++++++----- 13 files changed, 635 insertions(+), 622 deletions(-) diff --git a/lectures/dynamic_programming/career.md b/lectures/dynamic_programming/career.md index 5ec9cad2..339ca17c 100644 --- a/lectures/dynamic_programming/career.md +++ b/lectures/dynamic_programming/career.md @@ -134,7 +134,7 @@ $$ Interpretation: -* draw $q$ from a β distribution with shape parameters $(a, b)$ +* draw $q$ from a beta distribution with shape parameters $(a, b)$ * run $n$ independent binary trials, each with success probability $q$ * $p(k \,|\, n, a, b)$ is the probability of $k$ successes in these $n$ trials @@ -156,7 +156,6 @@ using Test ```{code-cell} julia using LaTeXStrings, Plots, QuantEcon, Distributions - n = 50 a_vals = [0.5, 1, 100] b_vals = [0.5, 1, 100] @@ -182,22 +181,22 @@ Implementation: The code for solving the DP problem described above is found below: ```{code-cell} julia -function CareerWorkerProblem(;β = 0.95, +function CareerWorkerProblem(; beta = 0.95, B = 5.0, N = 50, F_a = 1.0, F_b = 1.0, G_a = 1.0, G_b = 1.0) - θ = range(0, B, length = N) - ϵ = copy(θ) - dist_F = BetaBinomial(N-1, F_a, F_b) - dist_G = BetaBinomial(N-1, G_a, G_b) + theta = range(0, B, length = N) + epsilon = copy(theta) + dist_F = BetaBinomial(N - 1, F_a, F_b) + dist_G = BetaBinomial(N - 1, G_a, G_b) F_probs = pdf.(dist_F, support(dist_F)) G_probs = pdf.(dist_G, support(dist_G)) - F_mean = sum(θ .* F_probs) - G_mean = sum(ϵ .* G_probs) - return (β = β, N = N, B = B, θ = θ, ϵ = ϵ, + F_mean = sum(theta .* F_probs) + G_mean = sum(epsilon .* G_probs) + return (beta = beta, N = N, B = B, theta = theta, epsilon = epsilon, F_probs = F_probs, G_probs = G_probs, F_mean = F_mean, G_mean = G_mean) end @@ -206,17 +205,15 @@ function update_bellman!(cp, v, out; ret_policy = false) # new life. This is a function of the distribution parameters and is # always constant. No need to recompute it in the loop - v3 = (cp.G_mean + cp.F_mean .+ cp.β .* - cp.F_probs' * v * cp.G_probs)[1] # do not need 1 element array + v3 = (cp.G_mean + cp.F_mean .+ cp.beta .* cp.F_probs' * v * cp.G_probs)[1] # do not need 1 element array - for j in 1:cp.N - for i in 1:cp.N + for j in 1:(cp.N) + for i in 1:(cp.N) # stay put - v1 = cp.θ[i] + cp.ϵ[j] + cp.β * v[i, j] + v1 = cp.theta[i] + cp.epsilon[j] + cp.beta * v[i, j] # new job - v2 = (cp.θ[i] .+ cp.G_mean .+ cp.β .* - v[i, :]' * cp.G_probs)[1] # do not need a single element array + v2 = (cp.theta[i] .+ cp.G_mean .+ cp.beta .* v[i, :]' * cp.G_probs)[1] # do not need a single element array if ret_policy if v1 > max(v2, v3) @@ -234,7 +231,6 @@ function update_bellman!(cp, v, out; ret_policy = false) end end - function update_bellman(cp, v; ret_policy = false) out = similar(v) update_bellman!(cp, v, out, ret_policy = ret_policy) @@ -274,8 +270,9 @@ v_init = fill(100.0, wp.N, wp.N) func(x) = update_bellman(wp, x) v = compute_fixed_point(func, v_init, max_iter = 500, verbose = false) -plot(linetype = :surface, wp.θ, wp.ϵ, transpose(v), xlabel=L"\theta", ylabel=L"\epsilon", - seriescolor=:plasma, gridalpha = 1) +plot(linetype = :surface, wp.theta, wp.epsilon, transpose(v), xlabel = L"\theta", + ylabel = L"\epsilon", + seriescolor = :plasma, gridalpha = 1) ``` The optimal policy can be represented as follows (see {ref}`Exercise 3 ` for code). @@ -373,31 +370,31 @@ G = DiscreteRV(wp.G_probs) function gen_path(T = 20) i = j = 1 - θ_ind = Int[] - ϵ_ind = Int[] + theta_ind = Int[] + epsilon_ind = Int[] - for t=1:T + for t in 1:T # do nothing if stay put if optimal_policy[i, j] == 2 # new job j = rand(G)[1] elseif optimal_policy[i, j] == 3 # new life i, j = rand(F)[1], rand(G)[1] end - push!(θ_ind, i) - push!(ϵ_ind, j) + push!(theta_ind, i) + push!(epsilon_ind, j) end - return wp.θ[θ_ind], wp.ϵ[ϵ_ind] + return wp.theta[theta_ind], wp.epsilon[epsilon_ind] end plot_array = Any[] for i in 1:2 - θ_path, ϵ_path = gen_path() - plt = plot(ϵ_path, label=L"\epsilon") - plot!(plt, θ_path, label=L"\theta") - plot!(plt, legend=:bottomright) + theta_path, epsilon_path = gen_path() + plt = plot(epsilon_path, label = L"\epsilon") + plot!(plt, theta_path, label = L"\theta") + plot!(plt, legend = :bottomright) push!(plot_array, plt) end -plot(plot_array..., layout = (2,1)) +plot(plot_array..., layout = (2, 1)) ``` ```{code-cell} julia @@ -430,7 +427,6 @@ function gen_first_passage_time(optimal_policy) end end - M = 25000 samples = zeros(M) for i in 1:M @@ -448,12 +444,12 @@ tags: [remove-cell] end ``` -To compute the median with $\beta=0.99$ instead of the default value $\beta=0.95$, replace `wp=CareerWorkerProblem()` with `wp=CareerWorkerProblem(β=0.99)`. +To compute the median with $\beta=0.99$ instead of the default value $\beta=0.95$, replace `wp=CareerWorkerProblem()` with `wp=CareerWorkerProblem(beta=0.99)`. The medians are subject to randomness, but should be about 7 and 14 respectively. Not surprisingly, more patient workers will wait longer to settle down to their final job. ```{code-cell} julia -wp2 = CareerWorkerProblem(β=0.99) +wp2 = CareerWorkerProblem(beta = 0.99) v2, optimal_policy2 = solve_wp(wp2) @@ -485,30 +481,30 @@ lvls = [0.5, 1.5, 2.5, 3.5] x_grid = range(0, 5, length = 50) y_grid = range(0, 5, length = 50) -contour(x_grid, y_grid, optimal_policy', fill=true, levels=lvls,color = :Blues, - fillalpha=1, cbar = false) -contour!(xlabel=L"\theta", ylabel=L"\epsilon") -annotate!([(1.8,2.5, text("new life", 14, :white, :center))]) -annotate!([(4.5,2.5, text("new job", 14, :center))]) -annotate!([(4.0,4.5, text("stay put", 14, :center))]) +contour(x_grid, y_grid, optimal_policy', fill = true, levels = lvls, color = :Blues, + fillalpha = 1, cbar = false) +contour!(xlabel = L"\theta", ylabel = L"\epsilon") +annotate!([(1.8, 2.5, text("new life", 14, :white, :center))]) +annotate!([(4.5, 2.5, text("new job", 14, :center))]) +annotate!([(4.0, 4.5, text("stay put", 14, :center))]) ``` Now, we need only swap out for the new parameters ```{code-cell} julia -wp = CareerWorkerProblem(G_a=100.0, G_b=100.0); # use new params +wp = CareerWorkerProblem(G_a = 100.0, G_b = 100.0); # use new params v, optimal_policy = solve_wp(wp) lvls = [0.5, 1.5, 2.5, 3.5] x_grid = range(0, 5, length = 50) y_grid = range(0, 5, length = 50) -contour(x_grid, y_grid, optimal_policy', fill=true, levels=lvls,color = :Blues, - fillalpha=1, cbar = false) -contour!(xlabel=L"\theta", ylabel=L"\epsilon") -annotate!([(1.8,2.5, text("new life", 14, :white, :center))]) -annotate!([(4.5,2.5, text("new job", 14, :center))]) -annotate!([(4.0,4.5, text("stay put", 14, :center))]) +contour(x_grid, y_grid, optimal_policy', fill = true, levels = lvls, color = :Blues, + fillalpha = 1, cbar = false) +contour!(xlabel = L"\theta", ylabel = L"\epsilon") +annotate!([(1.8, 2.5, text("new life", 14, :white, :center))]) +annotate!([(4.5, 2.5, text("new job", 14, :center))]) +annotate!([(4.0, 4.5, text("stay put", 14, :center))]) ``` You will see that the region for which the worker diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 95c3d181..3fd1968c 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -394,17 +394,17 @@ using BenchmarkTools, Interpolations, Parameters, Plots, Roots ``` ```{code-cell} julia -function K!(Kg, g, grid, β, ∂u∂c, f, f′, shocks) -# This function requires the container of the output value as argument Kg +function K!(Kg, g, grid, beta, ∂u∂c, f, f′, shocks) + # This function requires the container of the output value as argument Kg # Construct linear interpolation object - g_func = LinearInterpolation(grid, g, extrapolation_bc=Line()) + g_func = LinearInterpolation(grid, g, extrapolation_bc = Line()) # solve for updated consumption value for (i, y) in enumerate(grid) function h(c) vals = ∂u∂c.(g_func.(f(y - c) * shocks)) .* f′(y - c) .* shocks - return ∂u∂c(c) - β * mean(vals) + return ∂u∂c(c) - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end @@ -412,8 +412,7 @@ function K!(Kg, g, grid, β, ∂u∂c, f, f′, shocks) end # The following function does NOT require the container of the output value as argument -K(g, grid, β, ∂u∂c, f, f′, shocks) = - K!(similar(g), g, grid, β, ∂u∂c, f, f′, shocks) +K(g, grid, beta, ∂u∂c, f, f′, shocks) = K!(similar(g), g, grid, beta, ∂u∂c, f, f′, shocks) ``` It has some similarities to the code for the Bellman operator in our {doc}`optimal growth lecture <../dynamic_programming/optgrowth>`. @@ -425,29 +424,29 @@ Here's that Bellman operator code again, which needs to be executed because we'l ```{code-cell} julia using Optim -function T(w, grid, β, u, f, shocks, Tw = similar(w); - compute_policy = false) +function T(w, grid, beta, u, f, shocks, Tw = similar(w); + compute_policy = false) # apply linear interpolation to w - w_func = LinearInterpolation(grid, w, extrapolation_bc=Line()) + w_func = LinearInterpolation(grid, w, extrapolation_bc = Line()) if compute_policy - σ = similar(w) + sigma = similar(w) end - # set Tw[i] = max_c { u(c) + β E w(f(y - c) z)} + # set Tw[i] = max_c { u(c) + beta E w(f(y - c) z)} for (i, y) in enumerate(grid) - objective(c) = u(c) + β * mean(w_func.(f(y - c) .* shocks)) + objective(c) = u(c) + beta * mean(w_func.(f(y - c) .* shocks)) res = maximize(objective, 1e-10, y) if compute_policy - σ[i] = Optim.maximizer(res) + sigma[i] = Optim.maximizer(res) end Tw[i] = Optim.maximum(res) end if compute_policy - return Tw, σ + return Tw, sigma else return Tw end @@ -463,21 +462,22 @@ solution. Here's an object containing data from the log-linear growth model we used in the {doc}`value function iteration lecture <../dynamic_programming/optgrowth>` ```{code-cell} julia -isoelastic(c, γ) = isone(γ) ? log(c) : (c^(1 - γ) - 1) / (1 - γ) -Model(α = 0.65, # Productivity parameter - β = 0.95, # Discount factor - γ = 1.0, # Risk aversion - μ = 0.0, # First parameter in lognorm(μ, σ) - s = 0.1, # Second parameter in lognorm(μ, σ) - grid = range(1e-6, 4, length = 200), # Grid - grid_min = 1e-6, # Smallest grid point - grid_max = 4.0, # Largest grid point - grid_size = 200, # Number of grid points - u = (c, γ = γ) -> isoelastic(c, γ), # utility function - ∂u∂c = c -> c^(-γ), # u′ - f = k -> k^α, # production function - f′ = k -> α * k^(α - 1), # f′ - ) = (;α,β,γ ,μ,s,grid,grid_min,grid_max,grid_size,u,∂u∂c,f,f′) +isoelastic(c, gamma) = isone(gamma) ? log(c) : (c^(1 - gamma) - 1) / (1 - gamma) +function Model(alpha = 0.65, # Productivity parameter + beta = 0.95, # Discount factor + gamma = 1.0, # Risk aversion + mu = 0.0, # First parameter in lognorm(mu, sigma) + s = 0.1, # Second parameter in lognorm(mu, sigma) + grid = range(1e-6, 4, length = 200), # Grid + grid_min = 1e-6, # Smallest grid point + grid_max = 4.0, # Largest grid point + grid_size = 200, # Number of grid points + u = (c, gamma = gamma) -> isoelastic(c, gamma), # utility function + ∂u∂c = c -> c^(-gamma), # u′ + f = k -> k^alpha, # production function + f′ = k -> alpha * k^(alpha - 1)) + (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, ∂u∂c, f, f′) +end ``` Next we generate an instance @@ -493,7 +493,7 @@ using Random Random.seed!(42) # for reproducible results. shock_size = 250 # number of shock draws in Monte Carlo integral -shocks = collect(exp.(m.μ .+ m.s * randn(shock_size))); # generate shocks +shocks = collect(exp.(m.mu .+ m.s * randn(shock_size))); # generate shocks ``` As a preliminary test, let's see if $K c^* = c^*$, as implied by the @@ -512,8 +512,8 @@ end ```{code-cell} julia function verify_true_policy(m, shocks, c_star) # compute (Kc_star) - (;grid, β, ∂u∂c, f, f′) = m - c_star_new = K(c_star, grid, β, ∂u∂c, f, f′, shocks) + (; grid, beta, ∂u∂c, f, f′) = m + c_star_new = K(c_star, grid, beta, ∂u∂c, f, f′, shocks) # plot c_star and Kc_star plot(grid, c_star, label = L"optimal policy $c^*$") @@ -523,7 +523,7 @@ end ``` ```{code-cell} julia -c_star = (1 - m.α * m.β) * m.grid # true policy (c_star) +c_star = (1 - m.alpha * m.beta) * m.grid # true policy (c_star) verify_true_policy(m, shocks, c_star) ``` @@ -548,11 +548,11 @@ The initial condition we'll use is the one that eats the whole pie: $c(y) = y$ ```{code-cell} julia function check_convergence(m, shocks, c_star, g_init; n_iter = 15) - (;grid, β, ∂u∂c, f, f′) = m - g = g_init; + (; grid, beta, ∂u∂c, f, f′) = m + g = g_init plot(m.grid, g, lw = 2, alpha = 0.6, label = L"intial condition $c(y) = y$") for i in 1:n_iter - new_g = K(g, grid, β, ∂u∂c, f, f′, shocks) + new_g = K(g, grid, beta, ∂u∂c, f, f′, shocks) g = new_g plot!(grid, g, lw = 2, alpha = 0.6, label = "") end @@ -585,7 +585,7 @@ discussed above ```{code-cell} julia function iterate_updating(func, arg_init; sim_length = 20) - arg = arg_init; + arg = arg_init for i in 1:sim_length new_arg = func(arg) arg = new_arg @@ -594,16 +594,16 @@ function iterate_updating(func, arg_init; sim_length = 20) end function compare_error(m, shocks, g_init, w_init; sim_length = 20) - (;grid, β, u, ∂u∂c, f, f′) = m + (; grid, beta, u, ∂u∂c, f, f′) = m g, w = g_init, w_init # two functions for simplification - bellman_single_arg(w) = T(w, grid, β, u, f, shocks) - coleman_single_arg(g) = K(g, grid, β, ∂u∂c, f, f′, shocks) + bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) + coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) g = iterate_updating(coleman_single_arg, grid, sim_length = 20) w = iterate_updating(bellman_single_arg, u.(grid), sim_length = 20) - new_w, vf_g = T(w, grid, β, u, f, shocks, compute_policy = true) + new_w, vf_g = T(w, grid, beta, u, f, shocks, compute_policy = true) pf_error = c_star - g vf_error = c_star - vf_g @@ -616,7 +616,7 @@ end ``` ```{code-cell} julia -compare_error(m, shocks, m.grid, m.u.(m.grid), sim_length=20) +compare_error(m, shocks, m.grid, m.u.(m.grid), sim_length = 20) ``` As you can see, time iteration is much more accurate for a given @@ -735,22 +735,22 @@ Here's the code, which will execute if you've run all the code above ```{code-cell} julia # Model instance with risk aversion = 1.5 # others are the same as the previous instance -m_ex = Model(γ = 1.5); +m_ex = Model(gamma = 1.5); ``` ```{code-cell} julia -function exercise2(m, shocks, g_init = m.grid, w_init = m.u.(m.grid); sim_length = 20) - - (;grid, β, u, ∂u∂c, f, f′) = m +function exercise2(m, shocks, g_init = m.grid, w_init = m.u.(m.grid); + sim_length = 20) + (; grid, beta, u, ∂u∂c, f, f′) = m # initial policy and value g, w = g_init, w_init # iteration - bellman_single_arg(w) = T(w, grid, β, u, f, shocks) - coleman_single_arg(g) = K(g, grid, β, ∂u∂c, f, f′, shocks) + bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) + coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) g = iterate_updating(coleman_single_arg, grid, sim_length = 20) w = iterate_updating(bellman_single_arg, u.(m.grid), sim_length = 20) - new_w, vf_g = T(w, grid, β, u, f, shocks, compute_policy = true) + new_w, vf_g = T(w, grid, beta, u, f, shocks, compute_policy = true) plot(grid, g, lw = 2, alpha = 0.6, label = "policy iteration") plot!(grid, vf_g, lw = 2, alpha = 0.6, label = "value iteration") @@ -759,7 +759,7 @@ end ``` ```{code-cell} julia -exercise2(m_ex, shocks, m.grid, m.u.(m.grid), sim_length=20) +exercise2(m_ex, shocks, m.grid, m.u.(m.grid), sim_length = 20) ``` The policies are indeed close. @@ -772,13 +772,13 @@ It assumes that you've just run the code from the previous exercise ```{code-cell} julia function bellman(m, shocks) - (;grid, β, u, ∂u∂c, f, f′) = m - bellman_single_arg(w) = T(w, grid, β, u, f, shocks) + (; grid, beta, u, ∂u∂c, f, f′) = m + bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) iterate_updating(bellman_single_arg, u.(grid), sim_length = 20) end function coleman(m, shocks) - (;grid, β, ∂u∂c, f, f′) = m - coleman_single_arg(g) = K(g, grid, β, ∂u∂c, f, f′, shocks) + (; grid, beta, ∂u∂c, f, f′) = m + coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) iterate_updating(coleman_single_arg, grid, sim_length = 20) end ``` diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index bb02a9b9..faf2ee3f 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -432,11 +432,11 @@ using BenchmarkTools, Plots, QuantEcon, Parameters ``` ```{code-cell} julia -SimpleOG(B = 10, M = 5, α = 0.5, β = 0.9)= (;B, M , α , β ) +SimpleOG(B = 10, M = 5, alpha = 0.5, beta = 0.9) = (; B, M, alpha, beta) function transition_matrices(g) - (;B, M, α, β) = g - u(c) = c^α + (; B, M, alpha, beta) = g + u(c) = c^alpha n = B + M + 1 m = M + 1 @@ -446,7 +446,7 @@ function transition_matrices(g) for a in 0:M Q[:, a + 1, (a:(a + B)) .+ 1] .= 1 / (B + 1) for s in 0:(B + M) - R[s + 1, a + 1] = (a≤s ? u(s - a) : -Inf) + R[s + 1, a + 1] = (a <= s ? u(s - a) : -Inf) end end @@ -468,8 +468,8 @@ In case the preceding code was too concise, we can see a more verbose form tags: [output_scroll] --- function verbose_matrices(g) - (;B, M, α, β) = g - u(c) = c^α + (;B, M, alpha, beta) = g + u(c) = c^alpha #Matrix dimensions. The +1 is due to the 0 state. n = B + M + 1 @@ -504,12 +504,12 @@ function verbose_matrices(g) end ``` -Instances of `DiscreteDP` are created using the signature `DiscreteDP(R, Q, β)`. +Instances of `DiscreteDP` are created using the signature `DiscreteDP(R, Q, beta)`. Let's create an instance using the objects stored in `g` ```{code-cell} julia -ddp = DiscreteDP(R, Q, g.β); +ddp = DiscreteDP(R, Q, g.beta); ``` Now that we have an instance `ddp` of `DiscreteDP` we can solve it as follows @@ -524,7 +524,7 @@ Let's see what we've got here fieldnames(typeof(results)) ``` -The most important attributes are `v`, the value function, and `σ`, the optimal policy +The most important attributes are `v`, the value function, and `sigma`, the optimal policy ```{code-cell} julia results.v @@ -569,7 +569,7 @@ results.num_iter tags: [remove-cell] --- @testset "Iteration Tests" begin - @test results.num_iter ≤ 3 # Make sure we didn't take more cycles, compared to v0.6 + @test results.num_iter <= 3 # Make sure we didn't take more cycles, compared to v0.6 end ``` @@ -606,10 +606,10 @@ Here's the same information in a bar graph What happens if the agent is more patient? ```{code-cell} julia -g_2 = SimpleOG(β=0.99); +g_2 = SimpleOG(beta = 0.99); Q_2, R_2 = transition_matrices(g_2); -ddp_2 = DiscreteDP(R_2, Q_2, g_2.β) +ddp_2 = DiscreteDP(R_2, Q_2, g_2.beta) results_2 = solve(ddp_2, PFI) @@ -639,7 +639,7 @@ One of the advantages of this alternative set up is that it permits use of a spa (An example of using sparse matrices is given in the exercises below) -The call signature of the second formulation is `DiscreteDP(R, Q, β, s_indices, a_indices)` where +The call signature of the second formulation is `DiscreteDP(R, Q, beta, s_indices, a_indices)` where * `s_indices` and `a_indices` are arrays of equal length `L` enumerating all feasible state-action pairs * `R` is an array of length `L` giving corresponding rewards @@ -650,9 +650,9 @@ Here's how we could set up these objects for the preceding example ```{code-cell} julia B = 10 M = 5 -α = 0.5 -β = 0.9 -u(c) = c^α +alpha = 0.5 +beta = 0.9 +u(c) = c^alpha n = B + M + 1 m = M + 1 @@ -670,11 +670,11 @@ for s in 0:(M + B) q = zeros(1, n) q[(a + 1):((a + B) + 1)] .= b Q = [Q; q] - R = [R; u(s-a)] + R = [R; u(s - a)] end end -ddp = DiscreteDP(R, Q, β, s_indices, a_indices); +ddp = DiscreteDP(R, Q, beta, s_indices, a_indices); results = solve(ddp, PFI) ``` @@ -707,10 +707,10 @@ we let $f(k) = k^{\alpha}$ with $\alpha = 0.65$, $u(c) = \log c$, and $\beta = 0.95$. ```{code-cell} julia -α = 0.65 -f(k) = k.^α +alpha = 0.65 +f(k) = k .^ alpha u_log(x) = log(x) -β = 0.95 +beta = 0.95 ``` Here we want to solve a finite state version of the continuous state @@ -766,7 +766,7 @@ end Now let's set up $R$ and $Q$ ```{code-cell} julia -R = u_log.(C[C.>0]); +R = u_log.(C[C .> 0]); ``` ```{code-cell} julia @@ -793,14 +793,14 @@ We're now in a position to create an instance of `DiscreteDP` corresponding to the growth model. ```{code-cell} julia -ddp = DiscreteDP(R, Q, β, s_indices, a_indices); +ddp = DiscreteDP(R, Q, beta, s_indices, a_indices); ``` ### Solving the Model ```{code-cell} julia results = solve(ddp, PFI) -v, σ, num_iter = results.v, results.sigma, results.num_iter +v, sigma, num_iter = results.v, results.sigma, results.num_iter num_iter ``` @@ -810,8 +810,8 @@ tags: [remove-cell] --- @testset "Results Test" begin #test v[4] ≈ -42.301381867365954 - @test σ[4] == 10 - @test num_iter ≤ 10 + @test sigma[4] == 10 + @test num_iter <= 10 end ``` @@ -819,14 +819,14 @@ Let us compare the solution of the discrete model with the exact solution of the original continuous model. Here's the exact solution: ```{code-cell} julia -c = f(grid) - grid[σ] +c = f(grid) - grid[sigma] -ab = α * β -c1 = (log(1 - α * β) + log(α * β) * α * β / (1 - α * β)) / (1 - β) -c2 = α / (1 - α * β) +ab = alpha * beta +c1 = (log(1 - alpha * beta) + log(alpha * beta) * alpha * beta / (1 - alpha * beta)) / (1 - beta) +c2 = alpha / (1 - alpha * beta) v_star(k) = c1 + c2 * log(k) -c_star(k) = (1 - α * β) * k.^α +c_star(k) = (1 - alpha * beta) * k .^ alpha ``` ```{code-cell} julia @@ -843,7 +843,8 @@ end Let's plot the value functions. ```{code-cell} julia -plot(grid, [v v_star.(grid)], ylim = (-40, -32), lw = 2, label = ["discrete" "continuous"]) +plot(grid, [v v_star.(grid)], ylim = (-40, -32), lw = 2, + label = ["discrete" "continuous"]) ``` They are barely distinguishable (although you can see the difference if @@ -853,7 +854,8 @@ Now let's look at the discrete and exact policy functions for consumption. ```{code-cell} julia -plot(grid, [c c_star.(grid)], lw = 2, label = ["discrete" "continuous"], legend = :topleft) +plot(grid, [c c_star.(grid)], lw = 2, label = ["discrete" "continuous"], + legend = :topleft) ``` These functions are again close, although some difference is visible and @@ -891,7 +893,7 @@ end The value function is monotone, as expected: ```{code-cell} julia -all(x -> x ≥ 0, diff(v)) +all(x -> x >= 0, diff(v)) ``` ```{code-cell} julia @@ -899,7 +901,7 @@ all(x -> x ≥ 0, diff(v)) tags: [remove-cell] --- @testset "Monotonicity Test" begin - @test all(x -> x ≥ 0, diff(v)) + @test all(x -> x >= 0, diff(v)) end ``` @@ -924,7 +926,7 @@ res1.num_iter ``` ```{code-cell} julia -σ == res1.sigma +sigma == res1.sigma ``` ```{code-cell} julia @@ -932,7 +934,7 @@ res1.num_iter tags: [remove-cell] --- @testset "Equivalence Test" begin - #test σ == res1.sigma + #test sigma == res1.sigma end ``` @@ -946,7 +948,7 @@ res2.num_iter ``` ```{code-cell} julia -σ == res2.sigma +sigma == res2.sigma ``` ```{code-cell} julia @@ -954,7 +956,7 @@ res2.num_iter tags: [remove-cell] --- @testset "Other Equivalence Test" begin - #test σ == res2.sigma + #test sigma == res2.sigma end ``` @@ -970,10 +972,10 @@ n = 50 ws = [] colors = [] w = w_init -for i in 0:n-1 +for i in 0:(n - 1) w = bellman_operator(ddp, w) push!(ws, w) - push!(colors, RGBA(0, 0, 0, i/n)) + push!(colors, RGBA(0, 0, 0, i / n)) end plot(grid, @@ -983,7 +985,7 @@ plot(grid, xlims = extrema(grid), label = "initial condition") -plot!(grid, ws, label = "", color = reshape(colors, 1, length(colors)), lw = 2) +plot!(grid, ws, label = "", color = reshape(colors, 1, length(colors)), lw = 2) plot!(grid, v_star.(grid), label = "true value function", color = :red, lw = 2) ``` @@ -1008,8 +1010,8 @@ function compute_policies(n_vals...) for n in 1:maximum(n_vals) w = bellman_operator(ddp, w) if n in n_vals - σ = compute_greedy(ddp, w) - c_policy = f(grid) - grid[σ] + sigma = compute_greedy(ddp, w) + c_policy = f(grid) - grid[sigma] push!(c_policies, c_policy) end end @@ -1061,21 +1063,21 @@ condition $k_0 = 0.1$. discount_factors = (0.9, 0.94, 0.98) k_init = 0.1 -k_init_ind = findfirst(collect(grid) .≥ k_init) +k_init_ind = findfirst(collect(grid) .>= k_init) sample_size = 25 -ddp0 = DiscreteDP(R, Q, β, s_indices, a_indices) +ddp0 = DiscreteDP(R, Q, beta, s_indices, a_indices) k_paths = [] labels = [] -for β in discount_factors - ddp0.beta = β +for beta in discount_factors + ddp0.beta = beta res0 = solve(ddp0, PFI) - k_path_ind = simulate(res0.mc, sample_size, init=k_init_ind) - k_path = grid[k_path_ind.+1] + k_path_ind = simulate(res0.mc, sample_size, init = k_init_ind) + k_path = grid[k_path_ind .+ 1] push!(k_paths, k_path) - push!(labels, L"\beta = %$β") + push!(labels, L"\beta = %$beta") end plot(k_paths, diff --git a/lectures/dynamic_programming/ifp.md b/lectures/dynamic_programming/ifp.md index cbbb115d..c574324f 100644 --- a/lectures/dynamic_programming/ifp.md +++ b/lectures/dynamic_programming/ifp.md @@ -387,8 +387,8 @@ u(x) = log(x) du(x) = 1 / x # model -function ConsumerProblem(;r = 0.01, - β = 0.96, +function ConsumerProblem(; r = 0.01, + beta = 0.96, Π = [0.6 0.4; 0.05 0.95], z_vals = [0.5, 1.0], b = 0.0, @@ -397,13 +397,14 @@ function ConsumerProblem(;r = 0.01, R = 1 + r asset_grid = range(-b, grid_max, length = grid_size) - return (r = r, R = R, β = β, b = b, Π = Π, z_vals = z_vals, asset_grid = asset_grid) + return (r = r, R = R, beta = beta, b = b, Π = Π, z_vals = z_vals, + asset_grid = asset_grid) end function T!(cp, V, out; ret_policy = false) # unpack input, set up arrays - (;R, Π, β, b, asset_grid, z_vals) = cp + (; R, Π, beta, b, asset_grid, z_vals) = cp z_idx = 1:length(z_vals) # value function when the shock index is z_i @@ -414,10 +415,9 @@ function T!(cp, V, out; ret_policy = false) # solve for RHS of Bellman equation for (i_z, z) in enumerate(z_vals) for (i_a, a) in enumerate(asset_grid) - function obj(c) EV = dot(vf.(R * a + z - c, z_idx), Π[i_z, :]) # compute expectation - return u(c) + β * EV + return u(c) + beta * EV end res = maximize(obj, opt_lb, R .* a .+ z .+ b) converged(res) || error("Didn't converge") # important to check @@ -427,26 +427,22 @@ function T!(cp, V, out; ret_policy = false) else out[i_a, i_z] = maximum(res) end - end end out end -T(cp, V; ret_policy = false) = - T!(cp, V, similar(V); ret_policy = ret_policy) +T(cp, V; ret_policy = false) = T!(cp, V, similar(V); ret_policy = ret_policy) -get_greedy!(cp, V, out) = - update_bellman!(cp, V, out, ret_policy = true) +get_greedy!(cp, V, out) = update_bellman!(cp, V, out, ret_policy = true) -get_greedy(cp, V) = - update_bellman(cp, V, ret_policy = true) +get_greedy(cp, V) = update_bellman(cp, V, ret_policy = true) function K!(cp, c, out) # simplify names, set up arrays - (;R, Π, β, b, asset_grid, z_vals) = cp + (; R, Π, beta, b, asset_grid, z_vals) = cp z_idx = 1:length(z_vals) - gam = R * β + gam = R * beta # policy function when the shock index is z_i cf = interp(asset_grid, c) @@ -461,7 +457,7 @@ function K!(cp, c, out) expectation = dot(du.(cps), Π[i_z, :]) return abs(du(t) - max(gam * expectation, du(R * a + z + b))) end - opt_ub = R*a + z + b # addresses issue #8 on github + opt_ub = R * a + z + b # addresses issue #8 on github res = optimize(h, min(opt_lb, opt_ub - 1e-2), opt_ub, method = Optim.Brent()) out[i_a, i_z] = minimizer(res) @@ -474,7 +470,7 @@ K(cp, c) = K!(cp, c, similar(c)) function initialize(cp) # simplify names, set up arrays - (;R, β, b, asset_grid, z_vals) = cp + (; R, beta, b, asset_grid, z_vals) = cp shape = length(asset_grid), length(z_vals) V, c = zeros(shape...), zeros(shape...) @@ -483,7 +479,7 @@ function initialize(cp) for (i_a, a) in enumerate(asset_grid) c_max = R * a + z + b c[i_a, i_z] = c_max - V[i_a, i_z] = u(c_max) / (1 - β) + V[i_a, i_z] = u(c_max) / (1 - beta) end end @@ -682,7 +678,7 @@ println("Starting value function iteration") for i in 1:N V = T(cp, V) end -c1 = T(cp, V, ret_policy=true) +c1 = T(cp, V, ret_policy = true) V2, c2 = initialize(cp) println("Starting policy function iteration") @@ -743,7 +739,7 @@ end ```{code-cell} julia function compute_asset_series(cp, T = 500_000; verbose = false) - (;Π, z_vals, R) = cp # simplify names + (; Π, z_vals, R) = cp # simplify names z_idx = 1:length(z_vals) v_init, c_init = initialize(cp) c = compute_fixed_point(x -> K(cp, x), c_init, @@ -755,7 +751,7 @@ function compute_asset_series(cp, T = 500_000; verbose = false) z_seq = simulate(MarkovChain(Π), T) for t in 1:T i_z = z_seq[t] - a[t+1] = R * a[t] + z_vals[i_z] - cf(a[t], i_z) + a[t + 1] = R * a[t] + z_vals[i_z] - cf(a[t], i_z) end return a end diff --git a/lectures/dynamic_programming/jv.md b/lectures/dynamic_programming/jv.md index e8f9987a..3b0c2671 100644 --- a/lectures/dynamic_programming/jv.md +++ b/lectures/dynamic_programming/jv.md @@ -184,66 +184,64 @@ using Test ``` ```{code-cell} julia - # model object - function JvWorker(;A = 1.4, - α = 0.6, - β = 0.96, - grid_size = 50, - ϵ = 1e-4) - - G(x, ϕ) = A .* (x .* ϕ).^α - π_func = sqrt - F = Beta(2, 2) - - # expectation operator - E = expectation(F) - - # Set up grid over the state space for DP - # Max of grid is the max of a large quantile value for F and the - # fixed point y = G(y, 1). - grid_max = max(A^(1.0 / (1.0 - α)), quantile(F, 1 - ϵ)) - - # range for range(ϵ, grid_max, grid_size). Needed for - # CoordInterpGrid below - x_grid = range(ϵ, grid_max, length = grid_size) - - return (A = A, α = α, β = β, x_grid = x_grid, G = G, - π_func = π_func, F = F, E = E, ϵ = ϵ) - end +# model object +function JvWorker(; A = 1.4, + alpha = 0.6, + beta = 0.96, + grid_size = 50, + epsilon = 1e-4) + G(x, phi) = A .* (x .* phi) .^ alpha + π_func = sqrt + F = Beta(2, 2) + + # expectation operator + E = expectation(F) + + # Set up grid over the state space for DP + # Max of grid is the max of a large quantile value for F and the + # fixed point y = G(y, 1). + grid_max = max(A^(1.0 / (1.0 - alpha)), quantile(F, 1 - epsilon)) + + # range for range(epsilon, grid_max, grid_size). Needed for + # CoordInterpGrid below + x_grid = range(epsilon, grid_max, length = grid_size) + + return (A = A, alpha = alpha, beta = beta, x_grid = x_grid, G = G, + π_func = π_func, F = F, E = E, epsilon = epsilon) +end function T!(jv, - V, - new_V::AbstractVector) + V, + new_V::AbstractVector) # simplify notation - (;G, π_func, F, β, E, ϵ) = jv + (; G, π_func, F, beta, E, epsilon) = jv # prepare interpoland of value function - Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc=Line()) + Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc = Line()) # instantiate the linesearch variables max_val = -1.0 cur_val = 0.0 max_s = 1.0 - max_ϕ = 1.0 - search_grid = range(ϵ, 1.0, length = 15) + max_phi = 1.0 + search_grid = range(epsilon, 1.0, length = 15) for (i, x) in enumerate(jv.x_grid) - function w(z) - s, ϕ = z - h(u) = Vf(max(G(x, ϕ), u)) + s, phi = z + h(u) = Vf(max(G(x, phi), u)) integral = E(h) - q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, ϕ)) + q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, phi)) - return - x * (1.0 - ϕ - s) - β * q + return -x * (1.0 - phi - s) - beta * q end for s in search_grid - for ϕ in search_grid - cur_val = ifelse(s + ϕ <= 1.0, -w((s, ϕ)), -1.0) + for phi in search_grid + cur_val = ifelse(s + phi <= 1.0, -w((s, phi)), -1.0) if cur_val > max_val - max_val, max_s, max_ϕ = cur_val, s, ϕ + max_val, max_s, max_phi = cur_val, s, phi end end end @@ -253,47 +251,46 @@ function T!(jv, end function T!(jv, - V, - out::Tuple{AbstractVector, AbstractVector}) + V, + out::Tuple{AbstractVector, AbstractVector}) # simplify notation - (;G, π_func, F, β, E, ϵ) = jv + (; G, π_func, F, beta, E, epsilon) = jv # prepare interpoland of value function - Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc=Line()) + Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc = Line()) # instantiate variables - s_policy, ϕ_policy = out[1], out[2] + s_policy, phi_policy = out[1], out[2] # instantiate the linesearch variables max_val = -1.0 cur_val = 0.0 max_s = 1.0 - max_ϕ = 1.0 - search_grid = range(ϵ, 1.0, length = 15) + max_phi = 1.0 + search_grid = range(epsilon, 1.0, length = 15) for (i, x) in enumerate(jv.x_grid) - function w(z) - s, ϕ = z - h(u) = Vf(max(G(x, ϕ), u)) + s, phi = z + h(u) = Vf(max(G(x, phi), u)) integral = E(h) - q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, ϕ)) + q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, phi)) - return - x * (1.0 - ϕ - s) - β * q + return -x * (1.0 - phi - s) - beta * q end for s in search_grid - for ϕ in search_grid - cur_val = ifelse(s + ϕ <= 1.0, -w((s, ϕ)), -1.0) + for phi in search_grid + cur_val = ifelse(s + phi <= 1.0, -w((s, phi)), -1.0) if cur_val > max_val - max_val, max_s, max_ϕ = cur_val, s, ϕ + max_val, max_s, max_phi = cur_val, s, phi end end end - s_policy[i], ϕ_policy[i] = max_s, max_ϕ -end + s_policy[i], phi_policy[i] = max_s, max_phi + end end function T(jv, V; ret_policies = false) @@ -372,17 +369,17 @@ Let's plot the optimal policies and see what they look like. The code is as follows ```{code-cell} julia -wp = JvWorker(grid_size=25) +wp = JvWorker(grid_size = 25) v_init = collect(wp.x_grid) .* 0.5 f(x) = T(wp, x) V = fixedpoint(f, v_init) sol_V = V.zero -s_policy, ϕ_policy = T(wp, sol_V, ret_policies = true) +s_policy, phi_policy = T(wp, sol_V, ret_policies = true) # plot solution -p = plot(wp.x_grid, [ϕ_policy s_policy sol_V], +p = plot(wp.x_grid, [phi_policy s_policy sol_V], title = [L"$\phi$ policy" L"$s$ policy" "value function"], color = [:orange :blue :green], xaxis = (L"x", (0.0, maximum(wp.x_grid))), @@ -404,7 +401,7 @@ Overall, the policies match well with our predictions from {ref}`section v2 : max(v1, v2) @@ -277,37 +276,38 @@ function T!(sp, v, out; end function T(sp, v; - ret_policy = false) + ret_policy = false) out_type = ret_policy ? Bool : Float64 out = zeros(out_type, sp.n_w, sp.n_π) - T!(sp, v, out, ret_policy=ret_policy) + T!(sp, v, out, ret_policy = ret_policy) end - get_greedy!(sp, v, out) = T!(sp, v, out, ret_policy = true) get_greedy(sp, v) = T(sp, v, ret_policy = true) -function res_wage_operator!(sp, ϕ, out) +function res_wage_operator!(sp, phi, out) # simplify name - (;f, g, β, c) = sp + (; f, g, beta, c) = sp - # Construct interpolator over π_grid, given ϕ - ϕ_f = LinearInterpolation(sp.π_grid, ϕ, extrapolation_bc = Line()) + # Construct interpolator over π_grid, given phi + phi_f = LinearInterpolation(sp.π_grid, phi, extrapolation_bc = Line()) # set up quadrature nodes/weights q_nodes, q_weights = qnwlege(7, 0.0, sp.w_max) for (i, _π) in enumerate(sp.π_grid) - integrand(x) = max.(x, ϕ_f.(q.(Ref(sp), x, _π))) .* (_π * f(x) + (1 - _π) * g(x)) + function integrand(x) + max.(x, phi_f.(q.(Ref(sp), x, _π))) .* (_π * f(x) + (1 - _π) * g(x)) + end integral = do_quad(integrand, q_nodes, q_weights) - out[i] = (1 - β) * c + β * integral + out[i] = (1 - beta) * c + beta * integral end end -function res_wage_operator(sp, ϕ) - out = similar(ϕ) - res_wage_operator!(sp, ϕ, out) +function res_wage_operator(sp, phi) + out = similar(phi) + res_wage_operator!(sp, phi, out) return out end ``` @@ -327,28 +327,28 @@ Here's the value function: ```{code-cell} julia # Set up the problem and initial guess, solve by VFI -sp = SearchProblem(;w_grid_size=100, π_grid_size=100) -v_init = fill(sp.c / (1 - sp.β), sp.n_w, sp.n_π) +sp = SearchProblem(; w_grid_size = 100, π_grid_size = 100) +v_init = fill(sp.c / (1 - sp.beta), sp.n_w, sp.n_π) f(x) = T(sp, x) v = compute_fixed_point(f, v_init) policy = get_greedy(sp, v) # Make functions for the linear interpolants of these vf = extrapolate(interpolate((sp.w_grid, sp.π_grid), v, Gridded(Linear())), - Flat()) + Flat()) pf = extrapolate(interpolate((sp.w_grid, sp.π_grid), policy, - Gridded(Linear())), Flat()) - -function plot_value_function(;w_plot_grid_size = 100, - π_plot_grid_size = 100) - π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) - w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) - Z = [vf(w_plot_grid[j], π_plot_grid[i]) - for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] - p = contour(π_plot_grid, w_plot_grid, Z, levels=15, alpha=0.6, - fill=true, size=(400, 400), c=:lightrainbow) - plot!(xlabel=L"\pi", ylabel=L"w", xguidefont=font(12)) - return p + Gridded(Linear())), Flat()) + +function plot_value_function(; w_plot_grid_size = 100, + π_plot_grid_size = 100) + π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) + w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) + Z = [vf(w_plot_grid[j], π_plot_grid[i]) + for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] + p = contour(π_plot_grid, w_plot_grid, Z, levels = 15, alpha = 0.6, + fill = true, size = (400, 400), c = :lightrainbow) + plot!(xlabel = L"\pi", ylabel = L"w", xguidefont = font(12)) + return p end plot_value_function() @@ -358,15 +358,15 @@ plot_value_function() The optimal policy: ```{code-cell} julia -function plot_policy_function(;w_plot_grid_size = 100, +function plot_policy_function(; w_plot_grid_size = 100, π_plot_grid_size = 100) - π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) - w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) + π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) + w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) Z = [pf(w_plot_grid[j], π_plot_grid[i]) - for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] - p = contour(π_plot_grid, w_plot_grid, Z, levels=1, alpha=0.6, fill=true, - size=(400, 400), c=:coolwarm) - plot!(xlabel=L"\pi", ylabel="wage", xguidefont=font(12), cbar=false) + for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] + p = contour(π_plot_grid, w_plot_grid, Z, levels = 1, alpha = 0.6, fill = true, + size = (400, 400), c = :coolwarm) + plot!(xlabel = L"\pi", ylabel = "wage", xguidefont = font(12), cbar = false) annotate!(0.4, 1.0, "reject") annotate!(0.7, 1.8, "accept") return p @@ -557,16 +557,16 @@ time is much shorter than that of the value function approach in ```{code-cell} julia sp = SearchProblem(π_grid_size = 50) -ϕ_init = ones(sp.n_π) +phi_init = ones(sp.n_π) f_ex1(x) = res_wage_operator(sp, x) -w̄ = compute_fixed_point(f_ex1, ϕ_init) +w̄ = compute_fixed_point(f_ex1, phi_init) -plot(sp.π_grid, w̄, linewidth = 2, color=:black, +plot(sp.π_grid, w̄, linewidth = 2, color = :black, fillrange = 0, fillalpha = 0.15, fillcolor = :blue) plot!(sp.π_grid, 2 * ones(length(w̄)), linewidth = 0, fillrange = w̄, fillalpha = 0.12, fillcolor = :green, legend = :none) plot!(ylims = (0, 2), annotations = [(0.42, 1.2, "reject"), - (0.7, 1.8, "accept")]) + (0.7, 1.8, "accept")]) ``` The next piece of code is not one of the exercises from QuantEcon -- it's @@ -587,11 +587,11 @@ Random.seed!(42) # Set up model and compute the function w̄ sp = SearchProblem(π_grid_size = 50, F_a = 1, F_b = 1) -ϕ_init = ones(sp.n_π) +phi_init = ones(sp.n_π) g(x) = res_wage_operator(sp, x) -w̄_vals = compute_fixed_point(g, ϕ_init) -w̄ = extrapolate(interpolate((sp.π_grid, ), w̄_vals, - Gridded(Linear())), Flat()) +w̄_vals = compute_fixed_point(g, phi_init) +w̄ = extrapolate(interpolate((sp.π_grid,), w̄_vals, + Gridded(Linear())), Flat()) # Holds the employment state and beliefs of an individual agent. mutable struct Agent{TF <: AbstractFloat, TI <: Integer} @@ -599,12 +599,12 @@ mutable struct Agent{TF <: AbstractFloat, TI <: Integer} employed::TI end -Agent(_π=1e-3) = Agent(_π, 1) +Agent(_π = 1e-3) = Agent(_π, 1) function update!(ag, H) if ag.employed == 0 w = rand(H) * 2 # account for scale in julia - if w ≥ w̄(ag._π) + if w >= w̄(ag._π) ag.employed = 1 else ag._π = 1.0 ./ (1 .+ ((1 - ag._π) .* sp.g(w)) ./ (ag._π * sp.f(w))) @@ -617,7 +617,7 @@ num_agents = 5000 separation_rate = 0.025 # Fraction of jobs that end in each period separation_num = round(Int, num_agents * separation_rate) agent_indices = collect(1:num_agents) -agents = [Agent() for i=1:num_agents] +agents = [Agent() for i in 1:num_agents] sim_length = 600 H = sp.G # Start with distribution G change_date = 200 # Change to F after this many periods diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index 9ae8e6cc..c958d731 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -477,16 +477,17 @@ using Test ```{code-cell} julia f(x) = 2 .* cos.(6x) .+ sin.(14x) .+ 2.5 -c_grid = 0:.2:1 -f_grid = range(0, 1, length = 150) +c_grid = 0:0.2:1 +f_grid = range(0, 1, length = 150) Af = LinearInterpolation(c_grid, f(c_grid)) -plt = plot(xlim = (0,1), ylim = (0,6)) +plt = plot(xlim = (0, 1), ylim = (0, 6)) plot!(plt, f, f_grid, color = :blue, lw = 2, alpha = 0.8, label = "true function") plot!(plt, f_grid, Af.(f_grid), color = :green, lw = 2, alpha = 0.8, label = "linear approximation") -plot!(plt, f, c_grid, seriestype = :sticks, linestyle = :dash, linewidth = 2, alpha = 0.5, +plot!(plt, f, c_grid, seriestype = :sticks, linestyle = :dash, linewidth = 2, + alpha = 0.5, label = "") plot!(plt, legend = :top) ``` @@ -498,19 +499,20 @@ Another advantage of piecewise linear interpolation is that it preserves useful Here's a function that implements the Bellman operator using linear interpolation ```{code-cell} julia -function T(w;p, tol = 1e-10) - (;β, u, f, ξ, y) = p # unpack parameters +function T(w; p, tol = 1e-10) + (; beta, u, f, ξ, y) = p # unpack parameters w_func = LinearInterpolation(y, w) Tw = similar(w) - σ = similar(w) + sigma = similar(w) for (i, y_val) in enumerate(y) # solve maximization for each point in y, using y itself as initial condition. - results = maximize(c -> u(c;p) + β * mean(w_func.(f(y_val - c;p) .* ξ)), tol, y_val) + results = maximize(c -> u(c; p) + beta * mean(w_func.(f(y_val - c; p) .* ξ)), + tol, y_val) Tw[i] = maximum(results) - σ[i] = maximizer(results) + sigma[i] = maximizer(results) end - return (;w = Tw, σ) # returns named tuple of results + return (; w = Tw, sigma) # returns named tuple of results end ``` @@ -563,24 +565,26 @@ In addition to the model parameters, we need a grid and some shock draws for Mon ```{code-cell} julia Random.seed!(42) # for reproducible results -u(c;p) = log(c) # utility -f(k;p) = k^p.α # deterministic part of production function -OptimalGrowthModel(α = 0.4, β = 0.96, μ = 0.0, s = 0.1, - u = u, f = f, # defaults defined above - y = range(1e-5, 4.0, length = 200), # grid on y - ξ = exp.(μ .+ s * randn(250)) # monte carlo shocks -) = (; α, β, μ, s , u , f , y , ξ ) # named tuples defaults +u(c; p) = log(c) # utility +f(k; p) = k^p.alpha # deterministic part of production function +function # named tuples defaults +OptimalGrowthModel(alpha = 0.4, beta = 0.96, mu = 0.0, s = 0.1, + u = u, f = f, # defaults defined above + y = range(1e-5, 4.0, length = 200), # grid on y + ξ = exp.(mu .+ s * randn(250))) + (; alpha, beta, mu, s, u, f, y, ξ) +end # named tuples defaults # True value and policy function -function v_star(y;p) - (;α, μ, β) = p - c1 = log(1 - α * β) / (1 - β) - c2 = (μ + α * log(α * β)) / (1 - α) - c3 = 1 / (1 - β) - c4 = 1 / (1 - α * β) +function v_star(y; p) + (; alpha, mu, beta) = p + c1 = log(1 - alpha * beta) / (1 - beta) + c2 = (mu + alpha * log(alpha * beta)) / (1 - alpha) + c3 = 1 / (1 - beta) + c4 = 1 / (1 - alpha * beta) return c1 + c2 * (c3 - c4) + c4 * log(y) end -c_star(y;p) = (1 - p.α * p.β) * y +c_star(y; p) = (1 - p.alpha * p.beta) * y ``` ### A First Test @@ -601,9 +605,9 @@ w_star = v_star.(p.y; p) # evaluate closed form value along grid w = T(w_star; p).w # evaluate operator, access Tw results -plt = plot(ylim = (-35,-24)) +plt = plot(ylim = (-35, -24)) plot!(plt, p.y, w, linewidth = 2, alpha = 0.6, label = L"T(v^*)") -plot!(plt, p.y, w_star, linewidth = 2, alpha=0.6, label = L"v^*") +plot!(plt, p.y, w_star, linewidth = 2, alpha = 0.6, label = L"v^*") plot!(plt, legend = :bottomright) ``` @@ -623,12 +627,14 @@ lb = "initial condition" plt = plot(p.y, w, color = :black, linewidth = 2, alpha = 0.8, label = lb) for i in 1:n w = T(w; p).w - plot!(p.y, w, color = RGBA(i/n, 0, 1 - i/n, 0.8), linewidth = 2, alpha = 0.6, + plot!(p.y, w, color = RGBA(i / n, 0, 1 - i / n, 0.8), linewidth = 2, + alpha = 0.6, label = "") end lb = "true value function" -plot!(plt, p.y, v_star.(p.y; p), color = :black, linewidth = 2, alpha = 0.8, label = lb) +plot!(plt, p.y, v_star.(p.y; p), color = :black, linewidth = 2, alpha = 0.8, + label = lb) plot!(plt, legend = :bottomright) ``` @@ -653,11 +659,11 @@ We are clearly getting closer. We can write a function that computes the exact fixed point ```{code-cell} julia -function solve_optgrowth(initial_w; p, iterations = 500, m = 3, show_trace = false) - results = fixedpoint(w -> T(w;p).w, initial_w; iterations, m, show_trace) # Anderson iteration +function solve_optgrowth(initial_w; p, iterations = 500, m = 3, show_trace = false) + results = fixedpoint(w -> T(w; p).w, initial_w; iterations, m, show_trace) # Anderson iteration v_star = results.zero - σ = T(results.zero;p).σ - return (;v_star, σ, results) + sigma = T(results.zero; p).sigma + return (; v_star, sigma, results) end ``` @@ -665,14 +671,15 @@ We can check our result by plotting it against the true value ```{code-cell} julia initial_w = 5 * log.(p.y) -sol = solve_optgrowth(initial_w;p) +sol = solve_optgrowth(initial_w; p) v_star_approx = sol.v_star println("Converged in $(sol.results.iterations) to an ||residuals||_∞ of $(sol.results.residual_norm)") plt = plot(ylim = (-35, -24)) plot!(plt, p.y, v_star_approx, linewidth = 2, alpha = 0.6, label = "approximate value function") -plot!(plt, p.y, v_star.(p.y;p), linewidth = 2, alpha = 0.6, label = "true value function") +plot!(plt, p.y, v_star.(p.y; p), linewidth = 2, alpha = 0.6, + label = "true value function") plot!(plt, legend = :bottomright) ``` @@ -692,8 +699,10 @@ The next figure compares the result to the exact solution, which, as mentioned above, is $\sigma(y) = (1 - \alpha \beta) y$. ```{code-cell} julia -plt = plot(p.y, T(v_star_approx; p).σ, lw=2, alpha=0.6, label = "approximate policy function") -plot!(plt, p.y, c_star.(p.y; p), lw = 2, alpha = 0.6, label = "true policy function") +plt = plot(p.y, T(v_star_approx; p).sigma, lw = 2, alpha = 0.6, + label = "approximate policy function") +plot!(plt, p.y, c_star.(p.y; p), lw = 2, alpha = 0.6, + label = "true policy function") plot!(plt, legend = :bottomright) ``` @@ -755,28 +764,28 @@ Replicate the figure modulo randomness. Here's one solution (assuming as usual that you've executed everything above) ```{code-cell} julia -function simulate_og(σ, p, y0, ts_length) +function simulate_og(sigma, p, y0, ts_length) y = zeros(ts_length) y[1] = y0 - for t in 1:(ts_length-1) - y[t+1] = (y[t] - σ(y[t]))^p.α * exp(p.μ + p.s * randn()) + for t in 1:(ts_length - 1) + y[t + 1] = (y[t] - sigma(y[t]))^p.alpha * exp(p.mu + p.s * randn()) end return y end -β_vals = [0.9 0.94 0.98] +beta_vals = [0.9 0.94 0.98] ts_length = 100 y0 = 0.1 plt = plot() -for β in β_vals - p = OptimalGrowthModel(;β) # change β from default +for beta in beta_vals + p = OptimalGrowthModel(; beta) # change beta from default initial_w = 5 * log.(p.y) - sol = solve_optgrowth(initial_w;p) - σ_func = LinearInterpolation(p.y, sol.σ) - y = simulate_og(σ_func, p,y0, ts_length) + sol = solve_optgrowth(initial_w; p) + sigma_func = LinearInterpolation(p.y, sol.sigma) + y = simulate_og(sigma_func, p, y0, ts_length) - plot!(plt, 0:(ts_length-1), y, lw = 2, alpha = 0.6, label = L"\beta = %$β") + plot!(plt, 0:(ts_length - 1), y, lw = 2, alpha = 0.6, label = L"\beta = %$beta") end plt ``` \ No newline at end of file diff --git a/lectures/dynamic_programming/perm_income.md b/lectures/dynamic_programming/perm_income.md index 58a1a974..1fa228e1 100644 --- a/lectures/dynamic_programming/perm_income.md +++ b/lectures/dynamic_programming/perm_income.md @@ -478,29 +478,28 @@ using Test ```{code-cell} julia using Plots, Random - Random.seed!(42) const r = 0.05 -const β = 1.0 / (1.0 + r) +const beta = 1.0 / (1.0 + r) const T = 60 -const σ = 0.15 -const μ = 1.0 +const sigma = 0.15 +const mu = 1.0 function time_path2() - w = randn(T+1) - w[1] = 0.0 - b = zeros(T+1) - for t=2:T+1 + w = randn(T + 1) + w[1] = 0.0 + b = zeros(T + 1) + for t in 2:(T + 1) b[t] = sum(w[1:t]) end - b .*= -σ - c = μ .+ (1.0 - β) .* (σ .* w .- b) + b .*= -sigma + c = mu .+ (1.0 - beta) .* (sigma .* w .- b) return w, b, c end w, b, c = time_path2() -p = plot(0:T, μ .+ σ .* w, color = :green, label = "non-financial income") +p = plot(0:T, mu .+ sigma .* w, color = :green, label = "non-financial income") plot!(c, color = :black, label = "consumption") plot!(b, color = :blue, label = "debt") plot!(xlabel = "Time", linewidth = 2, alpha = 0.7, @@ -537,7 +536,7 @@ for i in 1:n push!(time_paths, time_path2()[3]) end -p = plot(time_paths, linewidth = 0.8, alpha=0.7, legend = :none) +p = plot(time_paths, linewidth = 0.8, alpha = 0.7, legend = :none) plot!(xlabel = "Time", ylabel = "Consumption", xlims = (0, T)) ``` @@ -830,28 +829,27 @@ permanent income shocks using impulse-response functions ```{code-cell} julia const r = 0.05 -const β = 1.0 / (1.0 + r) +const beta = 1.0 / (1.0 + r) const T2 = 20 # Time horizon const S = 5 # Impulse date -const σ1 = 0.15 -const σ2 = 0.15 - +const sigma1 = 0.15 +const sigma2 = 0.15 function time_path(permanent = false) - w1 = zeros(T2+1) + w1 = zeros(T2 + 1) w2 = similar(w1) b = similar(w1) c = similar(w1) if permanent === false - w2[S+2] = 1.0 + w2[S + 2] = 1.0 else - w1[S+2] = 1.0 + w1[S + 2] = 1.0 end - for t=2:T2 - b[t+1] = b[t] - σ2 * w2[t] - c[t+1] = c[t] + σ1 * w1[t+1] + (1 - β) * σ2 * w2[t+1] + for t in 2:T2 + b[t + 1] = b[t] - sigma2 * w2[t] + c[t + 1] = c[t] + sigma1 * w1[t + 1] + (1 - beta) * sigma2 * w2[t + 1] end return b, c @@ -862,8 +860,8 @@ L = 0.175 b1, c1 = time_path(false) b2, c2 = time_path(true) p = plot(0:T2, [c1 c2 b1 b2], layout = (2, 1), - color = [:green :green :blue :blue], - label = ["consumption" "consumption" "debt" "debt"]) + color = [:green :green :blue :blue], + label = ["consumption" "consumption" "debt" "debt"]) t = ["impulse-response, transitory income shock" "impulse-response, permanent income shock"] plot!(title = reshape(t, 1, length(t)), xlabel = "Time", ylims = (-L, L), diff --git a/lectures/dynamic_programming/perm_income_cons.md b/lectures/dynamic_programming/perm_income_cons.md index 5b58fab9..21a0dc7d 100644 --- a/lectures/dynamic_programming/perm_income_cons.md +++ b/lectures/dynamic_programming/perm_income_cons.md @@ -329,29 +329,28 @@ using Test using QuantEcon, LinearAlgebra using LaTeXStrings, Plots - # Set parameters -α, β, ρ1, ρ2, σ = 10.0, 0.95, 0.9, 0.0, 1.0 +alpha, beta, rho1, rho2, sigma = 10.0, 0.95, 0.9, 0.0, 1.0 -R = 1 / β +R = 1 / beta A = [1.0 0.0 0.0; - α ρ1 ρ2; - 0.0 1.0 0.0] -C = [0.0; σ; 0.0]'' + alpha rho1 rho2; + 0.0 1.0 0.0] +C = [0.0; sigma; 0.0]'' G = [0.0 1.0 0.0] # Form LinearStateSpace system and pull off steady state moments -μ_z0 = [1.0, 0.0, 0.0] -Σ_z0 = zeros(3, 3) -Lz = LSS(A, C, G, mu_0=μ_z0, Sigma_0=Σ_z0) -μ_z, μ_y, Σ_z, Σ_y = stationary_distributions(Lz) +mu_z0 = [1.0, 0.0, 0.0] +Sigma_z0 = zeros(3, 3) +Lz = LSS(A, C, G, mu_0 = mu_z0, Sigma_0 = Sigma_z0) +mu_z, mu_y, Sigma_z, Sigma_y = stationary_distributions(Lz) # Mean vector of state for the savings problem -mxo = [μ_z; 0.0] +mxo = [mu_z; 0.0] # Create stationary covariance matrix of x -- start everyone off at b=0 a1 = zeros(3, 1) -aa = hcat(Σ_z, a1) +aa = hcat(Sigma_z, a1) bb = zeros(1, 4) sxo = vcat(aa, bb) @@ -374,20 +373,20 @@ end The next step is to create the matrices for the LQ system ```{code-cell} julia -A12 = zeros(3,1) +A12 = zeros(3, 1) ALQ_l = hcat(A, A12) ALQ_r = [0 -R 0 R] ALQ = vcat(ALQ_l, ALQ_r) RLQ = [0.0 0.0 0.0 0.0; - 0.0 0.0 0.0 0.0; - 0.0 0.0 0.0 0.0; - 0.0 0.0 0.0 1e-9] + 0.0 0.0 0.0 0.0; + 0.0 0.0 0.0 0.0; + 0.0 0.0 0.0 1e-9] QLQ = 1.0 BLQ = [0.0; 0.0; 0.0; R] -CLQ = [0.0; σ; 0.0; 0.0] -β_LQ = β +CLQ = [0.0; sigma; 0.0; 0.0] +beta_LQ = beta ``` Let's print these out and have a look at them @@ -402,7 +401,7 @@ println("Q = $QLQ") Now create the appropriate instance of an LQ model ```{code-cell} julia -LQPI = QuantEcon.LQ(QLQ, RLQ, ALQ, BLQ, CLQ, bet=β_LQ); +LQPI = QuantEcon.LQ(QLQ, RLQ, ALQ, BLQ, CLQ, bet = beta_LQ); ``` We'll save the implied optimal policy function soon and compare with what we get by @@ -448,8 +447,8 @@ Now we'll apply the formulas in this system ```{code-cell} julia # Use the above formulas to create the optimal policies for b_{t+1} and c_t -b_pol = G * (inv(I - β * A)) * (A - I) -c_pol = (1 - β) * (G * inv(I - β * A)) +b_pol = G * (inv(I - beta * A)) * (A - I) +c_pol = (1 - beta) * (G * inv(I - beta * A)) # Create the A matrix for a LinearStateSpace instance A_LSS1 = vcat(A, b_pol) @@ -461,12 +460,12 @@ C_LSS = vcat(C, 0) # Create the G matrix for LSS methods G_LSS1 = vcat(G, c_pol) -G_LSS2 = vcat(0, -(1 - β)) +G_LSS2 = vcat(0, -(1 - beta)) G_LSS = hcat(G_LSS1, G_LSS2) # Use the following values to start everyone off at b=0, initial incomes zero -μ_0 = [1.0, 0.0, 0.0, 0.0] -Σ_0 = zeros(4, 4) +mu_0 = [1.0, 0.0, 0.0, 0.0] +Sigma_0 = zeros(4, 4) ``` A_LSS calculated as we have here should equal ABF calculated above using the LQ model @@ -520,7 +519,7 @@ A second graph plots a collection of simulations against the population distrib Comparing sample paths with population distributions at each date $t$ is a useful exercise---see {ref}`our discussion ` of the laws of large numbers. ```{code-cell} julia -lss = LSS(A_LSS, C_LSS, G_LSS, mu_0=μ_0, Sigma_0=Σ_0); +lss = LSS(A_LSS, C_LSS, G_LSS, mu_0 = mu_0, Sigma_0 = Sigma_0); ``` ### Population and sample panels @@ -533,9 +532,8 @@ In the code below, we use the [LSS](https://github.com/QuantEcon/QuantEcon.jl/bl graph as the population distribution ```{code-cell} julia -function income_consumption_debt_series(A, C, G, μ_0, Σ_0, T = 150, npaths = 25) - - lss = LSS(A, C, G, mu_0=μ_0, Sigma_0=Σ_0) +function income_consumption_debt_series(A, C, G, mu_0, Sigma_0, T = 150, npaths = 25) + lss = LSS(A, C, G, mu_0 = mu_0, Sigma_0 = Sigma_0) # simulation/Moment Parameters moment_generator = moment_sequence(lss) @@ -546,7 +544,7 @@ function income_consumption_debt_series(A, C, G, μ_0, Σ_0, T = 150, npaths = 2 ysim = zeros(npaths, T) for i in 1:npaths - sims = simulate(lss,T) + sims = simulate(lss, T) bsim[i, :] = sims[1][end, :] csim[i, :] = sims[2][2, :] ysim[i, :] = sims[2][1, :] @@ -557,10 +555,10 @@ function income_consumption_debt_series(A, C, G, μ_0, Σ_0, T = 150, npaths = 2 cons_var = similar(cons_mean) debt_mean = similar(cons_mean) debt_var = similar(cons_mean) - for (idx, t) = enumerate(moment_generator) - (μ_x, μ_y, Σ_x, Σ_y) = t - cons_mean[idx], cons_var[idx] = μ_y[2], Σ_y[2, 2] - debt_mean[idx], debt_var[idx] = μ_x[4], Σ_x[4, 4] + for (idx, t) in enumerate(moment_generator) + (mu_x, mu_y, Sigma_x, Sigma_y) = t + cons_mean[idx], cons_var[idx] = mu_y[2], Sigma_y[2, 2] + debt_mean[idx], debt_var[idx] = mu_x[4], Sigma_x[4, 4] idx == T && break end return bsim, csim, ysim, cons_mean, cons_var, debt_mean, debt_var @@ -569,31 +567,31 @@ end function consumption_income_debt_figure(bsim, csim, ysim) # get T - T = size(bsim, 2) + T = size(bsim, 2) # create first figure xvals = 1:T # plot consumption and income - plt_1 = plot(csim[1,:], label=L"c", color=:blue, lw=2) - plot!(plt_1, ysim[1, :], label=L"y", color=:green, lw=2) - plot!(plt_1, csim', alpha=0.1, color=:blue, label="") - plot!(plt_1, ysim', alpha=0.1, color=:green, label="") - plot!(plt_1, title="Nonfinancial Income, Consumption, and Debt", - xlabel=L"t", ylabel=L"$y$ and $c$",legend=:bottomright) + plt_1 = plot(csim[1, :], label = L"c", color = :blue, lw = 2) + plot!(plt_1, ysim[1, :], label = L"y", color = :green, lw = 2) + plot!(plt_1, csim', alpha = 0.1, color = :blue, label = "") + plot!(plt_1, ysim', alpha = 0.1, color = :green, label = "") + plot!(plt_1, title = "Nonfinancial Income, Consumption, and Debt", + xlabel = L"t", ylabel = L"$y$ and $c$", legend = :bottomright) # plot debt - plt_2 = plot(bsim[1,: ], label=L"b", color=:red, lw=2) - plot!(plt_2, bsim', alpha=0.1, color=:red,label="") - plot!(plt_2, xlabel=L"t", ylabel="debt",legend=:bottomright) + plt_2 = plot(bsim[1, :], label = L"b", color = :red, lw = 2) + plot!(plt_2, bsim', alpha = 0.1, color = :red, label = "") + plot!(plt_2, xlabel = L"t", ylabel = "debt", legend = :bottomright) - plot(plt_1, plt_2, layout=(2,1), size=(800,600)) + plot(plt_1, plt_2, layout = (2, 1), size = (800, 600)) end function consumption_debt_fanchart(csim, cons_mean, cons_var, bsim, debt_mean, debt_var) # get T - T = size(bsim, 2) + T = size(bsim, 2) # create percentiles of cross-section distributions cmean = mean(cons_mean) @@ -608,28 +606,32 @@ function consumption_debt_fanchart(csim, cons_mean, cons_var, xvals = 1:T # first fanchart - plt_1=plot(xvals, cons_mean, color=:black, lw=2, label="") - plot!(plt_1, xvals, Array(csim'), color=:black, alpha=0.25, label="") - plot!(plt_1, xvals, cons_mean, ribbon=c95, alpha=0.25, fillalpha=0.25, color=:blue, label="") - plot!(plt_1, xvals, cons_mean, ribbon=c90, alpha=0.25, fillalpha=0.25, color=:red, label="") - plot!(plt_1, title="Consumption/Debt over time", - ylim=(cmean-15, cmean+15), ylabel="consumption") + plt_1 = plot(xvals, cons_mean, color = :black, lw = 2, label = "") + plot!(plt_1, xvals, Array(csim'), color = :black, alpha = 0.25, label = "") + plot!(plt_1, xvals, cons_mean, ribbon = c95, alpha = 0.25, fillalpha = 0.25, + color = :blue, label = "") + plot!(plt_1, xvals, cons_mean, ribbon = c90, alpha = 0.25, fillalpha = 0.25, + color = :red, label = "") + plot!(plt_1, title = "Consumption/Debt over time", + ylim = (cmean - 15, cmean + 15), ylabel = "consumption") # second fanchart - plt_2=plot(xvals, debt_mean, color=:black, lw=2, label="") - plot!(plt_2, xvals, Array(bsim'), color=:black, alpha=0.25, label="") - plot!(plt_2, xvals, debt_mean, ribbon=d95, alpha=0.25, fillalpha=0.25, color=:blue, label="") - plot!(plt_2, xvals, debt_mean, ribbon=d90, alpha=0.25, fillalpha=0.25, color=:red, label="") - plot!(plt_2, ylabel="debt", xlabel=L"t") - - plot(plt_1, plt_2, layout=(2,1), size=(800,600)) + plt_2 = plot(xvals, debt_mean, color = :black, lw = 2, label = "") + plot!(plt_2, xvals, Array(bsim'), color = :black, alpha = 0.25, label = "") + plot!(plt_2, xvals, debt_mean, ribbon = d95, alpha = 0.25, fillalpha = 0.25, + color = :blue, label = "") + plot!(plt_2, xvals, debt_mean, ribbon = d90, alpha = 0.25, fillalpha = 0.25, + color = :red, label = "") + plot!(plt_2, ylabel = "debt", xlabel = L"t") + + plot(plt_1, plt_2, layout = (2, 1), size = (800, 600)) end ``` Now let's create figures with initial conditions of zero for $y_0$ and $b_0$ ```{code-cell} julia -out = income_consumption_debt_series(A_LSS, C_LSS, G_LSS, μ_0, Σ_0) +out = income_consumption_debt_series(A_LSS, C_LSS, G_LSS, mu_0, Sigma_0) bsim0, csim0, ysim0 = out[1:3] cons_mean0, cons_var0, debt_mean0, debt_var0 = out[4:end] @@ -703,9 +705,9 @@ By altering initial conditions, we shall remove this transient in our second exa ```{code-cell} julia function cointegration_figure(bsim, csim) # create figure - plot((1 - β) * bsim[1, :] + csim[1, :], color=:black,lw=2,label="") - plot!((1 - β) * bsim' + csim', color=:black, alpha=.1,label="") - plot!(title="Cointegration of Assets and Consumption", xlabel=L"t") + plot((1 - beta) * bsim[1, :] + csim[1, :], color = :black, lw = 2, label = "") + plot!((1 - beta) * bsim' + csim', color = :black, alpha = 0.1, label = "") + plot!(title = "Cointegration of Assets and Consumption", xlabel = L"t") end cointegration_figure(bsim0, csim0) diff --git a/lectures/dynamic_programming/robustness.md b/lectures/dynamic_programming/robustness.md index d0b5e292..38ed5ab9 100644 --- a/lectures/dynamic_programming/robustness.md +++ b/lectures/dynamic_programming/robustness.md @@ -935,53 +935,52 @@ using Test ```{code-cell} julia using QuantEcon, Plots, LinearAlgebra, Interpolations - # model parameters a_0 = 100 a_1 = 0.5 -ρ = 0.9 -σ_d = 0.05 -β = 0.95 +rho = 0.9 +sigma_d = 0.05 +beta = 0.95 c = 2 -γ = 50.0 -θ = 0.002 +gamma = 50.0 +theta = 0.002 ac = (a_0 - c) / 2.0 # Define LQ matrices -R = [ 0 ac 0; - ac -a_1 0.5; - 0. 0.5 0] +R = [0 ac 0; + ac -a_1 0.5; + 0.0 0.5 0] R = -R # For minimization -Q = Matrix([γ / 2.0]') -A = [1. 0. 0.; - 0. 1. 0.; - 0. 0. ρ] -B = [0. 1. 0.]' -C = [0. 0. σ_d]' +Q = Matrix([gamma / 2.0]') +A = [1.0 0.0 0.0; + 0.0 1.0 0.0; + 0.0 0.0 rho] +B = [0.0 1.0 0.0]' +C = [0.0 0.0 sigma_d]' ## Functions -function evaluate_policy(θ, F) - rlq = RBLQ(Q, R, A, B, C, β, θ) +function evaluate_policy(theta, F) + rlq = RBLQ(Q, R, A, B, C, beta, theta) K_F, P_F, d_F, O_F, o_F = evaluate_F(rlq, F) x0 = [1.0 0.0 0.0]' - value = - x0' * P_F * x0 .- d_F + value = -x0' * P_F * x0 .- d_F entropy = x0' * O_F * x0 .+ o_F return value[1], entropy[1] # return scalars end function value_and_entropy(emax, F, bw, grid_size = 1000) if lowercase(bw) == "worst" - θs = 1 ./ range(1e-8, 1000, length = grid_size) + thetas = 1 ./ range(1e-8, 1000, length = grid_size) else - θs = -1 ./ range(1e-8, 1000, length = grid_size) + thetas = -1 ./ range(1e-8, 1000, length = grid_size) end data = zeros(grid_size, 2) - for (i, θ) in enumerate(θs) - data[i, :] = collect(evaluate_policy(θ, F)) - if data[i, 2] ≥ emax # stop at this entropy level + for (i, theta) in enumerate(thetas) + data[i, :] = collect(evaluate_policy(theta, F)) + if data[i, 2] >= emax # stop at this entropy level data = data[1:i, :] break end @@ -992,18 +991,18 @@ end ## Main # compute optimal rule -optimal_lq = QuantEcon.LQ(Q, R, A, B, C, zero(B'A), bet=β) +optimal_lq = QuantEcon.LQ(Q, R, A, B, C, zero(B'A), bet = beta) Po, Fo, Do = stationary_values(optimal_lq) -# compute robust rule for our θ -baseline_robust = RBLQ(Q, R, A, B, C, β, θ) +# compute robust rule for our theta +baseline_robust = RBLQ(Q, R, A, B, C, beta, theta) Fb, Kb, Pb = robust_rule(baseline_robust) # Check the positive definiteness of worst-case covariance matrix to -# ensure that θ exceeds the breakdown point -test_matrix = I - (C' * Pb * C ./ θ)[1] +# ensure that theta exceeds the breakdown point +test_matrix = I - (C' * Pb * C ./ theta)[1] eigenvals, eigenvecs = eigen(test_matrix) -@assert all(x -> x ≥ 0, eigenvals) +@assert all(x -> x >= 0, eigenvals) emax = 1.6e6 @@ -1017,7 +1016,7 @@ robust_worst_case = value_and_entropy(emax, Fb, "worst") data_pairs = ((optimal_best_case, optimal_worst_case), (robust_best_case, robust_worst_case)) -egrid = range(0, emax, length = 100) +egrid = range(0, emax, length = 100) egrid_data = [] for data_pair in data_pairs for data in data_pair @@ -1026,12 +1025,12 @@ for data_pair in data_pairs push!(egrid_data, curve.(egrid)) end end -plot(egrid, egrid_data, color=[:red :red :blue :blue]) -plot!(egrid, egrid_data[1], fillrange=egrid_data[2], - fillcolor=:red, fillalpha=0.1, color=:red, legend=:none) -plot!(egrid, egrid_data[3], fillrange=egrid_data[4], - fillcolor=:blue, fillalpha=0.1, color=:blue, legend=:none) -plot!(xlabel="Entropy", ylabel="Value") +plot(egrid, egrid_data, color = [:red :red :blue :blue]) +plot!(egrid, egrid_data[1], fillrange = egrid_data[2], + fillcolor = :red, fillalpha = 0.1, color = :red, legend = :none) +plot!(egrid, egrid_data[3], fillrange = egrid_data[4], + fillcolor = :blue, fillalpha = 0.1, color = :blue, legend = :none) +plot!(xlabel = "Entropy", ylabel = "Value") ``` ```{code-cell} julia diff --git a/lectures/dynamic_programming/smoothing.md b/lectures/dynamic_programming/smoothing.md index 8e552093..da554958 100644 --- a/lectures/dynamic_programming/smoothing.md +++ b/lectures/dynamic_programming/smoothing.md @@ -361,19 +361,20 @@ using Parameters, Plots, QuantEcon, Random ``` ```{code-cell} julia -ConsumptionProblem(β = 0.96, - y = [2.0, 1.5], - b0 = 3.0, - P = [0.8 0.2 - 0.4 0.6]) = (; β ,y,b0,P) +function ConsumptionProblem(beta = 0.96, + y = [2.0, 1.5], + b0 = 3.0, + P = [0.8 0.2 + 0.4 0.6]) + (; beta, y, b0, P) +end function consumption_complete(cp) - - (;β, P, y, b0) = cp + (; beta, P, y, b0) = cp y1, y2 = y # extract income levels b1 = b0 # b1 is known to be equal to b0 - Q = β * P # assumed price system + Q = beta * P # assumed price system # Using equation (7) calculate b2 b2 = (y2 - y1 - (Q[1, 1] - Q[2, 1] - 1) * b1) / (Q[1, 2] + 1 - Q[2, 2]) @@ -385,32 +386,31 @@ function consumption_complete(cp) end function consumption_incomplete(cp; N_simul = 150) - - (;β, P, y, b0) = cp + (; beta, P, y, b0) = cp # for the simulation use the MarkovChain type mc = MarkovChain(P) # useful variables y = y - v = inv(I - β * P) * y + v = inv(I - beta * P) * y # simulate state path - s_path = simulate(mc, N_simul, init=1) + s_path = simulate(mc, N_simul, init = 1) # store consumption and debt path b_path, c_path = ones(N_simul + 1), ones(N_simul) b_path[1] = b0 # optimal decisions from (12) and (13) - db = ((1 - β) * v - y) / β + db = ((1 - beta) * v - y) / beta for (i, s) in enumerate(s_path) - c_path[i] = (1 - β) * (v[s, 1] - b_path[i]) + c_path[i] = (1 - beta) * (v[s, 1] - b_path[i]) b_path[i + 1] = b_path[i] + db[s, 1] end - return c_path, b_path[1:end - 1], y[s_path], s_path + return c_path, b_path[1:(end - 1)], y[s_path], s_path end ``` @@ -420,7 +420,7 @@ Let's test by checking that $\bar c$ and $b_2$ satisfy the budget constraint cp = ConsumptionProblem() c̄, b1, b2 = consumption_complete(cp) debt_complete = [b1, b2] -isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.β * cp.P)[2, :])[1], 0) +isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) ``` ```{code-cell} julia @@ -429,7 +429,7 @@ tags: [remove-cell] --- @testset "Budget Constraint Tests" begin # assert that the object above is always true - @test isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.β * cp.P)[2, :])[1], 0) + @test isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) @test c̄ ≈ 1.7241558441558444 # default example invariance @test debt_complete[2] ≈ 2.188311688311688 # one of the other objects end @@ -588,21 +588,23 @@ cp = ConsumptionProblem() c̄, b1, b2 = consumption_complete(cp) debt_complete = [b1, b2] -c_path, debt_path, y_path, s_path = consumption_incomplete(cp, N_simul=N_simul) +c_path, debt_path, y_path, s_path = consumption_incomplete(cp, N_simul = N_simul) -plt_cons = plot(title = "Consumption paths", xlabel = "Periods", ylim = [1.4,2.1]) +plt_cons = plot(title = "Consumption paths", xlabel = "Periods", ylim = [1.4, 2.1]) plot!(plt_cons, 1:N_simul, c_path, label = "incomplete market", lw = 2) plot!(plt_cons, 1:N_simul, fill(c̄, N_simul), label = "complete market", lw = 2) -plot!(plt_cons, 1:N_simul, y_path, label = "income", lw = 2, alpha = 0.6, linestyle = :dash) +plot!(plt_cons, 1:N_simul, y_path, label = "income", lw = 2, alpha = 0.6, + linestyle = :dash) plot!(plt_cons, legend = :bottom) plt_debt = plot(title = "Debt paths", xlabel = "Periods") plot!(plt_debt, 1:N_simul, debt_path, label = "incomplete market") plot!(plt_debt, 1:N_simul, debt_complete[s_path], label = "complete market", lw = 2) -plot!(plt_debt, 1:N_simul, y_path, label = "income", lw = 2, alpha = 0.6, linestyle = :dash) +plot!(plt_debt, 1:N_simul, y_path, label = "income", lw = 2, alpha = 0.6, + linestyle = :dash) plot!(plt_debt, legend = :bottomleft) -plot(plt_cons, plt_debt, layout = (1,2), size = (800, 400)) +plot(plt_cons, plt_debt, layout = (1, 2), size = (800, 400)) ``` ```{code-cell} julia @@ -630,19 +632,22 @@ income $y_t$, notice that We can simply relabel variables to acquire tax-smoothing interpretations of our two models ```{code-cell} julia -plt_tax = plot(title = "Tax collection paths", x_label = "Periods", ylim = [1.4,2.1]) +plt_tax = plot(title = "Tax collection paths", x_label = "Periods", + ylim = [1.4, 2.1]) plot!(plt_tax, 1:N_simul, c_path, label = "incomplete market", lw = 2) plot!(plt_tax, 1:N_simul, fill(c̄, N_simul), label = "complete market", lw = 2) -plot!(plt_tax, 1:N_simul, y_path, label = "govt expenditures", alpha = .6, linestyle = :dash, +plot!(plt_tax, 1:N_simul, y_path, label = "govt expenditures", alpha = 0.6, + linestyle = :dash, lw = 2) plt_gov = plot(title = "Government assets paths", x_label = "Periods") plot!(plt_gov, 1:N_simul, debt_path, label = "incomplete market", lw = 2) plot!(plt_gov, 1:N_simul, debt_complete[s_path], label = "complete market", lw = 2) -plot!(plt_gov, 1:N_simul, y_path, label = "govt expenditures", alpha = .6, linestyle = :dash, +plot!(plt_gov, 1:N_simul, y_path, label = "govt expenditures", alpha = 0.6, + linestyle = :dash, lw = 2) hline!(plt_gov, [0], linestyle = :dash, color = :black, lw = 2, label = "") -plot(plt_tax, plt_gov, layout = (1,2), size = (800, 400)) +plot(plt_tax, plt_gov, layout = (1, 2), size = (800, 400)) ``` ```{code-cell} julia @@ -693,14 +698,14 @@ Here's our code to compute a quantitative example with zero debt in peace time: ```{code-cell} julia # Parameters -β = .96 +beta = 0.96 y = [1.0, 2.0] b0 = 0.0 P = [0.8 0.2; 0.4 0.6] -cp = ConsumptionProblem(β, y, b0, P) -Q = β * P +cp = ConsumptionProblem(beta, y, b0, P) +Q = beta * P N_simul = 150 c̄, b1, b2 = consumption_complete(cp) @@ -733,7 +738,7 @@ println("T+b in war = $TB2") println("\n") println("Total government spending in peace and war") -G1= y[1] + AS1 +G1 = y[1] + AS1 G2 = y[2] + AS2 println("total govt spending in peace = $G1") println("total govt spending in war = $G2") @@ -919,7 +924,7 @@ Random.seed!(42); ``` ```{code-cell} julia -function complete_ss(β, b0, x0, A, C, S_y, T = 12) +function complete_ss(beta, b0, x0, A, C, S_y, T = 12) # Create a linear state space for simulation purposes # This adds "b" as a state to the linear state space system @@ -930,21 +935,21 @@ function complete_ss(β, b0, x0, A, C, S_y, T = 12) # Ctilde = vcat(C, zeros(1, 1)) # S_ytilde = hcat(S_y, zeros(1, 1)) - lss = LSS(A, C, S_y, mu_0=x0) + lss = LSS(A, C, S_y, mu_0 = x0) # Add extra state to initial condition # x0 = hcat(x0, 0) - # Compute the (I - β*A)^{-1} - rm = inv(I - β * A) + # Compute the (I - beta*A)^{-1} + rm = inv(I - beta * A) # Constant level of consumption - cbar = (1 - β) * (S_y * rm * x0 .- b0) + cbar = (1 - beta) * (S_y * rm * x0 .- b0) c_hist = ones(T) * cbar[1] # Debt x_hist, y_hist = simulate(lss, T) - b_hist = (S_y * rm * x_hist .- cbar[1] / (1.0 - β)) + b_hist = (S_y * rm * x_hist .- cbar[1] / (1.0 - beta)) return c_hist, vec(b_hist), vec(y_hist), x_hist end @@ -952,20 +957,20 @@ end N_simul = 150 # Define parameters -α, ρ1, ρ2 = 10.0, 0.9, 0.0 -σ = 1.0 +alpha, rho1, rho2 = 10.0, 0.9, 0.0 +sigma = 1.0 # N_simul = 1 # T = N_simul A = [1.0 0.0 0.0; - α ρ1 ρ2; + alpha rho1 rho2; 0.0 1.0 0.0] -C = [0.0, σ, 0.0] +C = [0.0, sigma, 0.0] S_y = [1.0 1.0 0.0] -β, b0 = 0.95, -10.0 -x0 = [1.0, α / (1 - ρ1), α / (1 - ρ1)] +beta, b0 = 0.95, -10.0 +x0 = [1.0, alpha / (1 - rho1), alpha / (1 - rho1)] # Do simulation for complete markets -out = complete_ss(β, b0, x0, A, C, S_y, 150) +out = complete_ss(beta, b0, x0, A, C, S_y, 150) c_hist_com, b_hist_com, y_hist_com, x_hist_com = out # Consumption plots @@ -980,7 +985,7 @@ plot!(plt_debt, 1:N_simul, b_hist_com, label = "debt", lw = 2) plot!(plt_debt, 1:N_simul, y_hist_com, label = "Income", lw = 2, alpha = 0.6, linestyle = :dash) hline!(plt_debt, [0], color = :black, linestyle = :dash, lw = 2, label = "") -plot(plt_cons, plt_debt, layout = (1,2), size = (800, 400)) +plot(plt_cons, plt_debt, layout = (1, 2), size = (800, 400)) plot!(legend = :bottomleft) ``` diff --git a/lectures/dynamic_programming/wald_friedman.md b/lectures/dynamic_programming/wald_friedman.md index 852babe9..ac9cadf7 100644 --- a/lectures/dynamic_programming/wald_friedman.md +++ b/lectures/dynamic_programming/wald_friedman.md @@ -181,13 +181,14 @@ using StatsPlots begin base_dist = [Beta(1, 1), Beta(3, 3)] - mixed_dist = MixtureModel.(Ref(base_dist), (p -> [p, one(p) - p]).(0.25:0.25:0.75)) - plot(plot(base_dist, labels = [L"f_0" L"f_1"], title = "Original Distributions"), + mixed_dist = MixtureModel.(Ref(base_dist), + (p -> [p, one(p) - p]).(0.25:0.25:0.75)) + plot(plot(base_dist, labels = [L"f_0" L"f_1"], + title = "Original Distributions"), plot(mixed_dist, labels = [L"1/4-3/4" L"1/2-1/2" L"3/4-1/4"], title = "Distribution Mixtures"), # Global settings across both plots - ylab = "Density", ylim = (0, 2), layout = (2, 1) - ) + ylab = "Density", ylim = (0, 2), layout = (2, 1)) end ``` @@ -389,7 +390,9 @@ Here's the code ```{code-cell} julia accept_x0(p, L0) = (one(p) - p) * L0 accept_x1(p, L1) = p * L1 -bayes_update(p, d0, d1) = p * pdf(d0, p) / pdf(MixtureModel([d0, d1], [p, one(p) - p]), p) +function bayes_update(p, d0, d1) + p * pdf(d0, p) / pdf(MixtureModel([d0, d1], [p, one(p) - p]), p) +end function draw_again(p, d0, d1, L0, L1, c, target) candidate = 0.0 cost = 0.0 @@ -423,7 +426,7 @@ function choice(p, d0, d1, L0, L1, c) end ``` -Next we solve a problem by finding the α, β values for the decision rule +Next we solve a problem by finding the alpha, beta values for the decision rule ```{code-cell} julia function decision_rule(d0, d1, L0, L1, c) @@ -442,27 +445,27 @@ function decision_rule(d0, d1, L0, L1, c) # Compute the choice at both sides left = first.(choice.(roots .- eps(), d0, d1, L0, L1, c)) right = first.(choice.(roots .+ eps(), d0, d1, L0, L1, c)) - # Find β by checking for a permanent transition from the area accepting to + # Find beta by checking for a permanent transition from the area accepting to # x₁ to never again accepting x₁ at the various indifference points - # Find α by checking for a permanent transition from the area accepting of + # Find alpha by checking for a permanent transition from the area accepting of # x₀ to never again accepting x₀ at the various indifference points - β = findlast((left .== 2) .& (right .≠ 2)) |> (x -> isa(x, Int) ? roots[x] : 0) - α = findfirst((left .≠ 1) .& (right .== 1)) |> (x -> isa(x, Int) ? roots[x] : 1) - if β < α - @printf("Accept x1 if p ≤ %.2f\nContinue to draw if %.2f ≤ p ≤ %.2f - \nAccept x0 if p ≥ %.2f", β, β, α, α) + beta = findlast((left .== 2) .& (right .≠ 2)) |> (x -> isa(x, Int) ? roots[x] : 0) + alpha = findfirst((left .≠ 1) .& (right .== 1)) |> (x -> isa(x, Int) ? roots[x] : 1) + if beta < alpha + @printf("Accept x1 if p <= %.2f\nContinue to draw if %.2f <= p <= %.2f + \nAccept x0 if p >= %.2f", beta, beta, alpha, alpha) else - x0 = accept_x0(β, L0) - x1 = accept_x1(β, L1) - draw = draw_again(β, d0, d1, L0, L1, c, min(x0, x1)) + x0 = accept_x0(beta, L0) + x1 = accept_x1(beta, L1) + draw = draw_again(beta, d0, d1, L0, L1, c, min(x0, x1)) if draw == min(x0, x1, draw) - @printf("Accept x1 if p ≤ %.2f\nContinue to draw if %.2f ≤ p ≤ %.2f - \nAccept x0 if p ≥ %.2f", β, β, α, α) + @printf("Accept x1 if p <= %.2f\nContinue to draw if %.2f <= p <= %.2f + \nAccept x0 if p >= %.2f", beta, beta, alpha, alpha) else - @printf("Accept x1 if p ≤ %.2f\nAccept x0 if p ≥ %.2f", β, α) + @printf("Accept x1 if p <= %.2f\nAccept x0 if p >= %.2f", beta, alpha) end end - return (α, β) + return (alpha, beta) end ``` @@ -470,8 +473,8 @@ We can simulate an agent facing a problem and the outcome with the following fun ```{code-cell} julia function simulation(problem) - (;d0, d1, L0, L1, c, p, n, return_output) = problem - α, β = decision_rule(d0, d1, L0, L1, c) + (; d0, d1, L0, L1, c, p, n, return_output) = problem + alpha, beta = decision_rule(d0, d1, L0, L1, c) outcomes = fill(false, n) costs = fill(0.0, n) trials = fill(0, n) @@ -487,9 +490,9 @@ function simulation(problem) t += 1 outcome = rand(d) p = bayes_update(p, d0, d1) - if p <= β + if p <= beta choice = 1 - elseif p >= α + elseif p >= alpha choice = 2 end end @@ -501,13 +504,15 @@ function simulation(problem) end @printf("\nCorrect: %.2f\nAverage Cost: %.2f\nAverage number of trials: %.2f", mean(outcomes), mean(costs), mean(trials)) - return return_output ? (α, β, outcomes, costs, trials) : nothing + return return_output ? (alpha, beta, outcomes, costs, trials) : nothing end -Problem(d0 = Beta(1,1), d1 = Beta(9,9), - L0 = 2, L1 = 2, - c = 0.2, p = 0.5, - n = 100, return_output = false) = (;d0,d1,L0,L1,c,p,n,return_output ); +function Problem(d0 = Beta(1, 1), d1 = Beta(9, 9), + L0 = 2, L1 = 2, + c = 0.2, p = 0.5, + n = 100, return_output = false) + (; d0, d1, L0, L1, c, p, n, return_output) +end; ``` ```{code-cell} julia @@ -517,13 +522,13 @@ tags: [remove-cell] @testset "Verifying Output" begin Random.seed!(0) (;d0, d1, L0, L1, c) = Problem() - α, β, outcomes, costs, trials = simulation(Problem(return_output = true)) - #test α ≈ 0.57428237 - #test β ≈ 0.352510338 - choices = first.(choice.((clamp(β - eps(), 0, 1), - clamp(β + eps(), 0, 1), - clamp(α - eps(), 0, 1), - clamp(α + eps(), 0, 1)), + alpha, beta, outcomes, costs, trials = simulation(Problem(return_output = true)) + #test alpha ≈ 0.57428237 + #test beta ≈ 0.352510338 + choices = first.(choice.((clamp(beta - eps(), 0, 1), + clamp(beta + eps(), 0, 1), + clamp(alpha - eps(), 0, 1), + clamp(alpha + eps(), 0, 1)), d0, d1, L0, L1, c)) #test choices[1] == 2 #test choices[2] ≠ 2 @@ -544,16 +549,16 @@ tags: [remove-cell] @testset "Comparative Statics" begin Random.seed!(0) (;d0, d1, L0, L1, c) = Problem() - α, β, outcomes, costs, trials = simulation(Problem(c = 2c, return_output = true)) - #test α ≈ 0.53551172 atol = 1e-3 - #test β ≈ 0.41244737 atol = 1e-3 + alpha, beta, outcomes, costs, trials = simulation(Problem(c = 2c, return_output = true)) + #test alpha ≈ 0.53551172 atol = 1e-3 + #test beta ≈ 0.41244737 atol = 1e-3 #test mean(outcomes) ≈ 0.39 atol = 1e-2 #test mean(costs) ≈ 1.696 atol = 1e-3 #test mean(trials) ≈ 1.19 atol = 1e-3 - choices = first.(choice.((clamp(β - eps(), 0, 1), - clamp(β + eps(), 0, 1), - clamp(α - eps(), 0, 1), - clamp(α + eps(), 0, 1)), + choices = first.(choice.((clamp(beta - eps(), 0, 1), + clamp(beta + eps(), 0, 1), + clamp(alpha - eps(), 0, 1), + clamp(alpha + eps(), 0, 1)), d0, d1, L0, L1, 2c)) #test choices[1] == 2 #test choices[2] ≠ 2 From a8f757f4ae96a07fc9289b3136318ee4a09b2af5 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Wed, 30 Aug 2023 11:32:47 -0400 Subject: [PATCH 03/19] formatting_changes --- format_myst.jl | 3 +- lectures/dynamic_programming/career.md | 8 +- .../coleman_policy_iter.md | 44 ++++---- lectures/dynamic_programming/discrete_dp.md | 9 +- .../dynamic_programming/egm_policy_iter.md | 100 +++++++++-------- lectures/dynamic_programming/ifp.md | 22 ++-- lectures/dynamic_programming/jv.md | 24 ++--- lectures/dynamic_programming/lqcontrol.md | 24 ++--- lectures/dynamic_programming/mccall_model.md | 93 ++++++++-------- .../mccall_model_with_separation.md | 86 +++++++-------- lectures/dynamic_programming/odu.md | 102 +++++++++--------- lectures/dynamic_programming/optgrowth.md | 11 +- .../dynamic_programming/perm_income_cons.md | 6 +- lectures/dynamic_programming/smoothing.md | 38 +++---- lectures/dynamic_programming/wald_friedman.md | 12 ++- 15 files changed, 301 insertions(+), 281 deletions(-) diff --git a/format_myst.jl b/format_myst.jl index f5850b33..0e24e1dd 100644 --- a/format_myst.jl +++ b/format_myst.jl @@ -57,7 +57,8 @@ function format_myst(input_file_path, output_file_path, extra_replacements = fal # Additional replacements are optional. This may be useful when replacing variable names to make it easier to type in ascii replacements = Dict("α" => "alpha", "β" => "beta", "γ" => "gamma", "≤" => "<=", "≥" => ">=", "Σ" => "Sigma", "σ" => "sigma","μ"=>"mu","ϕ"=>"phi","ψ"=>"psi","ϵ"=>"epsilon", - "δ"=>"delta","θ" => "theta","ζ"=>"zeta","X̄" => "X_bar","p̄" => "p_bar","x̂" => "x_hat","λ"=>"lambda","ρ"=>"rho") + "δ"=>"delta","θ" => "theta","ζ"=>"zeta","X̄" => "X_bar","p̄" => "p_bar","x̂" => "x_hat","λ"=>"lambda", + "ρ"=>"rho","u′" => "u_prime" , "f′"=>"f_prime"," ∂u∂c"=>"dudc","Π"=>"Pi","π"=>"pi"," ξ"=>"Xi","c̄"=>"c_bar","w̄"=>"w_bar") # Replace the code blocks in the content and handle exceptions try diff --git a/lectures/dynamic_programming/career.md b/lectures/dynamic_programming/career.md index 339ca17c..4c9daab6 100644 --- a/lectures/dynamic_programming/career.md +++ b/lectures/dynamic_programming/career.md @@ -134,7 +134,7 @@ $$ Interpretation: -* draw $q$ from a beta distribution with shape parameters $(a, b)$ +* draw $q$ from a $\beta$ distribution with shape parameters $(a, b)$ * run $n$ independent binary trials, each with success probability $q$ * $p(k \,|\, n, a, b)$ is the probability of $k$ successes in these $n$ trials @@ -196,9 +196,9 @@ function CareerWorkerProblem(; beta = 0.95, G_probs = pdf.(dist_G, support(dist_G)) F_mean = sum(theta .* F_probs) G_mean = sum(epsilon .* G_probs) - return (beta = beta, N = N, B = B, theta = theta, epsilon = epsilon, - F_probs = F_probs, G_probs = G_probs, - F_mean = F_mean, G_mean = G_mean) + return (; beta, N, B, theta, epsilon, + F_probs, G_probs, + F_mean, G_mean) end function update_bellman!(cp, v, out; ret_policy = false) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 3fd1968c..a338c2ea 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -377,7 +377,7 @@ Here's some code that implements the Coleman operator. tags: [hide-output] --- using LinearAlgebra, Statistics -using BenchmarkTools, Interpolations, LaTeXStrings, Parameters, Plots, Roots +using BenchmarkTools, Interpolations, LaTeXStrings, Plots, Roots using Optim, Random ``` @@ -389,12 +389,12 @@ using Test ``` ```{code-cell} julia -using BenchmarkTools, Interpolations, Parameters, Plots, Roots +using BenchmarkTools, Interpolations, Plots, Roots ``` ```{code-cell} julia -function K!(Kg, g, grid, beta, ∂u∂c, f, f′, shocks) +function K!(Kg, g, grid, beta, dudc, f, f_prime, shocks) # This function requires the container of the output value as argument Kg # Construct linear interpolation object @@ -403,8 +403,8 @@ function K!(Kg, g, grid, beta, ∂u∂c, f, f′, shocks) # solve for updated consumption value for (i, y) in enumerate(grid) function h(c) - vals = ∂u∂c.(g_func.(f(y - c) * shocks)) .* f′(y - c) .* shocks - return ∂u∂c(c) - beta * mean(vals) + vals = dudc.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks + returndudc(c) - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end @@ -412,7 +412,9 @@ function K!(Kg, g, grid, beta, ∂u∂c, f, f′, shocks) end # The following function does NOT require the container of the output value as argument -K(g, grid, beta, ∂u∂c, f, f′, shocks) = K!(similar(g), g, grid, beta, ∂u∂c, f, f′, shocks) +function K(g, grid, beta, dudc, f, f_prime, shocks) + K!(similar(g), g, grid, beta, dudc, f, f_prime, shocks) +end ``` It has some similarities to the code for the Bellman operator in our {doc}`optimal growth lecture <../dynamic_programming/optgrowth>`. @@ -473,10 +475,12 @@ function Model(alpha = 0.65, # Productivity parameter grid_max = 4.0, # Largest grid point grid_size = 200, # Number of grid points u = (c, gamma = gamma) -> isoelastic(c, gamma), # utility function - ∂u∂c = c -> c^(-gamma), # u′ + dudc = c -> c^(-gamma), # u_prime f = k -> k^alpha, # production function - f′ = k -> alpha * k^(alpha - 1)) - (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, ∂u∂c, f, f′) + f_prime = k -> alpha * k^(alpha - 1)) + return (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, + dudc, f, + f_prime) end ``` @@ -512,8 +516,8 @@ end ```{code-cell} julia function verify_true_policy(m, shocks, c_star) # compute (Kc_star) - (; grid, beta, ∂u∂c, f, f′) = m - c_star_new = K(c_star, grid, beta, ∂u∂c, f, f′, shocks) + (; grid, beta, dudc, f, f_prime) = m + c_star_new = K(c_star, grid, beta, dudc, f, f_prime, shocks) # plot c_star and Kc_star plot(grid, c_star, label = L"optimal policy $c^*$") @@ -548,11 +552,11 @@ The initial condition we'll use is the one that eats the whole pie: $c(y) = y$ ```{code-cell} julia function check_convergence(m, shocks, c_star, g_init; n_iter = 15) - (; grid, beta, ∂u∂c, f, f′) = m + (; grid, beta, dudc, f, f_prime) = m g = g_init plot(m.grid, g, lw = 2, alpha = 0.6, label = L"intial condition $c(y) = y$") for i in 1:n_iter - new_g = K(g, grid, beta, ∂u∂c, f, f′, shocks) + new_g = K(g, grid, beta, dudc, f, f_prime, shocks) g = new_g plot!(grid, g, lw = 2, alpha = 0.6, label = "") end @@ -594,12 +598,12 @@ function iterate_updating(func, arg_init; sim_length = 20) end function compare_error(m, shocks, g_init, w_init; sim_length = 20) - (; grid, beta, u, ∂u∂c, f, f′) = m + (; grid, beta, u, dudc, f, f_prime) = m g, w = g_init, w_init # two functions for simplification bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) - coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) + coleman_single_arg(g) = K(g, grid, beta, dudc, f, f_prime, shocks) g = iterate_updating(coleman_single_arg, grid, sim_length = 20) w = iterate_updating(bellman_single_arg, u.(grid), sim_length = 20) @@ -741,12 +745,12 @@ m_ex = Model(gamma = 1.5); ```{code-cell} julia function exercise2(m, shocks, g_init = m.grid, w_init = m.u.(m.grid); sim_length = 20) - (; grid, beta, u, ∂u∂c, f, f′) = m + (; grid, beta, u, dudc, f, f_prime) = m # initial policy and value g, w = g_init, w_init # iteration bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) - coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) + coleman_single_arg(g) = K(g, grid, beta, dudc, f, f_prime, shocks) g = iterate_updating(coleman_single_arg, grid, sim_length = 20) w = iterate_updating(bellman_single_arg, u.(m.grid), sim_length = 20) @@ -772,13 +776,13 @@ It assumes that you've just run the code from the previous exercise ```{code-cell} julia function bellman(m, shocks) - (; grid, beta, u, ∂u∂c, f, f′) = m + (; grid, beta, u, dudc, f, f_prime) = m bellman_single_arg(w) = T(w, grid, beta, u, f, shocks) iterate_updating(bellman_single_arg, u.(grid), sim_length = 20) end function coleman(m, shocks) - (; grid, beta, ∂u∂c, f, f′) = m - coleman_single_arg(g) = K(g, grid, beta, ∂u∂c, f, f′, shocks) + (; grid, beta, dudc, f, f_prime) = m + coleman_single_arg(g) = K(g, grid, beta, dudc, f, f_prime, shocks) iterate_updating(coleman_single_arg, grid, sim_length = 20) end ``` diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index faf2ee3f..5f98c1ee 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -427,7 +427,7 @@ using Test ``` ```{code-cell} julia -using BenchmarkTools, Plots, QuantEcon, Parameters +using BenchmarkTools, Plots, QuantEcon ``` @@ -450,7 +450,7 @@ function transition_matrices(g) end end - return (Q = Q, R = R) + return (; Q, R) end ``` @@ -500,7 +500,7 @@ function verbose_matrices(g) @assert sum(Q[s + 1, a + 1, :]) ≈ 1 #Optional check that matrix is stochastic end end - return (Q = Q, R = R) + return (;Q, R ) end ``` @@ -822,7 +822,8 @@ solution of the original continuous model. Here's the exact solution: c = f(grid) - grid[sigma] ab = alpha * beta -c1 = (log(1 - alpha * beta) + log(alpha * beta) * alpha * beta / (1 - alpha * beta)) / (1 - beta) +c1 = (log(1 - alpha * beta) + log(alpha * beta) * alpha * beta / (1 - alpha * beta)) / + (1 - beta) c2 = alpha / (1 - alpha * beta) v_star(k) = c1 + c2 * log(k) diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 55a679d6..6036bf7e 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -143,7 +143,7 @@ Here's an implementation of $K$ using EGM as described above. ```{code-cell} julia using LinearAlgebra, Statistics -using BenchmarkTools, Interpolations, LaTeXStrings, Parameters, Plots, Random, Roots +using BenchmarkTools, Interpolations, LaTeXStrings, Plots, Random, Roots ``` @@ -155,22 +155,22 @@ using Test ``` ```{code-cell} julia -function coleman_egm(g, k_grid, β, u′, u′_inv, f, f′, shocks) +function coleman_egm(g, k_grid, beta, u_prime, u_prime_inv, f, f_prime, shocks) # Allocate memory for value of consumption on endogenous grid points c = similar(k_grid) # Solve for updated consumption value for (i, k) in enumerate(k_grid) - vals = u′.(g.(f(k) * shocks)) .* f′(k) .* shocks - c[i] = u′_inv(β * mean(vals)) + vals = u_prime.(g.(f(k) * shocks)) .* f_prime(k) .* shocks + c[i] = u_prime_inv(beta * mean(vals)) end # Determine endogenous grid y = k_grid + c # y_i = k_i + c_i # Update policy function and return - Kg = LinearInterpolation(y,c, extrapolation_bc=Line()) + Kg = LinearInterpolation(y, c, extrapolation_bc = Line()) Kg_f(x) = Kg(x) return Kg_f end @@ -181,7 +181,7 @@ Note the lack of any root finding algorithm. We'll also run our original implementation, which uses an exogenous grid and requires root finding, so we can perform some comparisons ```{code-cell} julia -function K!(Kg, g, grid, β, u′, f, f′, shocks) +function K!(Kg, g, grid, beta, u_prime, f, f_prime, shocks) # This function requires the container of the output value as argument Kg @@ -191,8 +191,8 @@ function K!(Kg, g, grid, β, u′, f, f′, shocks) # solve for updated consumption value # for (i, y) in enumerate(grid) function h(c) - vals = u′.(g_func.(f(y - c) * shocks)) .* f′(y - c) .* shocks - return u′(c) - β * mean(vals) + vals = u_prime.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks + return u_prime(c) - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end @@ -200,8 +200,9 @@ function K!(Kg, g, grid, β, u′, f, f′, shocks) end # The following function does NOT require the container of the output value as argument -K(g, grid, β, u′, f, f′, shocks) = - K!(similar(g), g, grid, β, u′, f, f′, shocks) +function K(g, grid, beta, u_prime, f, f_prime, shocks) + K!(similar(g), g, grid, beta, u_prime, f, f_prime, shocks) +end ``` Let's test out the code above on some example parameterizations, after the following imports. @@ -215,20 +216,22 @@ The first step is to bring in the model that we used in the {doc}`Coleman policy ```{code-cell} julia # model -Model(α = 0.65, # productivity parameter - β = 0.95, # discount factor - γ = 1.0, # risk aversion - μ = 0.0, # lognorm(μ, σ) - s = 0.1, # lognorm(μ, σ) - grid_min = 1e-6, # smallest grid point - grid_max = 4.0, # largest grid point - grid_size = 200, # grid size - u = γ == 1 ? log : c->(c^(1-γ)-1)/(1-γ), # utility function - u′ = c-> c^(-γ), # u' - f = k-> k^α, # production function - f′ = k -> α*k^(α-1), # f' - grid = range(grid_min, grid_max, length = grid_size)) # grid - = (;α,β ,γ,μ ,s ,grid_min,grid_max ,grid_size ,u,u′ ,f ,f′,grid ) +function Model(alpha = 0.65, # productivity parameter + beta = 0.95, # discount factor + gamma = 1.0, # risk aversion + mu = 0.0, # lognorm(mu, sigma) + s = 0.1, # lognorm(mu, sigma) + grid_min = 1e-6, # smallest grid point + grid_max = 4.0, # largest grid point + grid_size = 200, # grid size + u = gamma == 1 ? log : c -> (c^(1 - gamma) - 1) / (1 - gamma), # utility function + u_prime = c -> c^(-gamma), # u' + f = k -> k^alpha, # production function + f_prime = k -> alpha * k^(alpha - 1), # f' + grid = range(grid_min, grid_max, length = grid_size)) # grid + return (; alpha, beta, gamma, mu, s, grid_min, grid_max, grid_size, u, u_prime, + f, f_prime, grid) +end ``` Next we generate an instance @@ -243,7 +246,7 @@ We also need some shock draws for Monte Carlo integration Random.seed!(42); # For reproducible behavior. shock_size = 250 # Number of shock draws in Monte Carlo integral -shocks = exp.(mlog.μ .+ mlog.s * randn(shock_size)); +shocks = exp.(mlog.mu .+ mlog.s * randn(shock_size)); ``` ```{code-cell} julia @@ -259,13 +262,13 @@ end As a preliminary test, let's see if $K c^* = c^*$, as implied by the theory ```{code-cell} julia -c_star(y) = (1 - mlog.α * mlog.β) * y +c_star(y) = (1 - mlog.alpha * mlog.beta) * y # some useful constants -ab = mlog.α * mlog.β -c1 = log(1 - ab) / (1 - mlog.β) -c2 = (mlog.μ + mlog.α * log(ab)) / (1 - mlog.α) -c3 = 1 / (1 - mlog.β) +ab = mlog.alpha * mlog.beta +c1 = log(1 - ab) / (1 - mlog.beta) +c2 = (mlog.mu + mlog.alpha * log(ab)) / (1 - mlog.alpha) +c3 = 1 / (1 - mlog.beta) c4 = 1 / (1 - ab) v_star(y) = c1 + c2 * (c3 - c4) + c4 * log(y) @@ -284,7 +287,8 @@ end ```{code-cell} julia function verify_true_policy(m, shocks, c_star) k_grid = m.grid - c_star_new = coleman_egm(c_star, k_grid, m.β, m.u′, m.u′, m.f, m.f′, shocks) + c_star_new = coleman_egm(c_star, k_grid, m.beta, m.u_prime, m.u_prime, m.f, + m.f_prime, shocks) plt = plot() plot!(plt, k_grid, c_star.(k_grid), lw = 2, label = L"optimal policy $c^*$") @@ -304,19 +308,19 @@ tags: [remove-cell] # This should look like a 45-degree line. ``` -Notice that we're passing u′ to coleman_egm twice. +Notice that we're passing u_prime to coleman_egm twice. The reason is that, in the case of log utility, $u'(c) = (u')^{-1}(c) = 1/c$. -Hence u′ and u′_inv are the same. +Hence u_prime and u_prime_inv are the same. We can't really distinguish the two plots. In fact it's easy to see that the difference is essentially zero: ```{code-cell} julia -c_star_new = coleman_egm(c_star, mlog.grid, mlog.β, mlog.u′, - mlog.u′, mlog.f, mlog.f′, shocks) +c_star_new = coleman_egm(c_star, mlog.grid, mlog.beta, mlog.u_prime, + mlog.u_prime, mlog.f, mlog.f_prime, shocks) maximum(abs(c_star_new(g) - c_star(g)) for g in mlog.grid) ``` @@ -344,16 +348,20 @@ function check_convergence(m, shocks, c_star, g_init, n_iter) g = g_init plt = plot() plot!(plt, m.grid, g.(m.grid), - color = RGBA(0,0,0,1), lw = 2, alpha = 0.6, label = L"initial condition $c(y) = y$") + color = RGBA(0, 0, 0, 1), lw = 2, alpha = 0.6, + label = L"initial condition $c(y) = y$") for i in 1:n_iter - new_g = coleman_egm(g, k_grid, m.β, m.u′, m.u′, m.f, m.f′, shocks) + new_g = coleman_egm(g, k_grid, m.beta, m.u_prime, m.u_prime, m.f, m.f_prime, + shocks) g = new_g - plot!(plt, k_grid, new_g.(k_grid), alpha = 0.6, color = RGBA(0,0,(i / n_iter), 1), + plot!(plt, k_grid, new_g.(k_grid), alpha = 0.6, + color = RGBA(0, 0, (i / n_iter), 1), lw = 2, label = "") end plot!(plt, k_grid, c_star.(k_grid), - color = :black, lw = 2, alpha = 0.8, label = L"true policy function $c^*$") + color = :black, lw = 2, alpha = 0.8, + label = L"true policy function $c^*$") plot!(plt, legend = :topleft) end ``` @@ -374,8 +382,8 @@ We'll do so using the CRRA model adopted in the exercises of the {doc}`Euler equ Here's the model and some convenient functions ```{code-cell} julia -mcrra = Model(α = 0.65, β = 0.95, γ = 1.5) -u′_inv(c) = c^(-1 / mcrra.γ) +mcrra = Model(alpha = 0.65, beta = 0.95, gamma = 1.5) +u_prime_inv(c) = c^(-1 / mcrra.gamma) ``` ```{code-cell} julia @@ -384,16 +392,18 @@ tags: [remove-cell] --- @testset "U Prime Tests" begin # test that the behavior of this function is invariant - @test u′_inv(3) ≈ 0.4807498567691362 + @test u_prime_inv(3) ≈ 0.4807498567691362 end ``` Here's the result ```{code-cell} julia -crra_coleman(g, m, shocks) = K(g, m.grid, m.β, m.u′, m.f, m.f′, shocks) -crra_coleman_egm(g, m, shocks) = coleman_egm(g, m.grid, m.β, m.u′, - u′_inv, m.f, m.f′, shocks) +crra_coleman(g, m, shocks) = K(g, m.grid, m.beta, m.u_prime, m.f, m.f_prime, shocks) +function crra_coleman_egm(g, m, shocks) + coleman_egm(g, m.grid, m.beta, m.u_prime, + u_prime_inv, m.f, m.f_prime, shocks) +end function coleman(m = m, shocks = shocks; sim_length = 20) g = m.grid for i in 1:sim_length diff --git a/lectures/dynamic_programming/ifp.md b/lectures/dynamic_programming/ifp.md index c574324f..e8b24307 100644 --- a/lectures/dynamic_programming/ifp.md +++ b/lectures/dynamic_programming/ifp.md @@ -376,7 +376,7 @@ using Test ```{code-cell} julia using LinearAlgebra, Statistics -using BenchmarkTools, LaTeXStrings, Optim, Parameters, Plots, QuantEcon, Random +using BenchmarkTools, LaTeXStrings, Optim, Plots, QuantEcon, Random using Optim: converged, maximum, maximizer, minimizer, iterations ``` @@ -389,7 +389,7 @@ du(x) = 1 / x # model function ConsumerProblem(; r = 0.01, beta = 0.96, - Π = [0.6 0.4; 0.05 0.95], + Pi = [0.6 0.4; 0.05 0.95], z_vals = [0.5, 1.0], b = 0.0, grid_max = 16, @@ -397,14 +397,14 @@ function ConsumerProblem(; r = 0.01, R = 1 + r asset_grid = range(-b, grid_max, length = grid_size) - return (r = r, R = R, beta = beta, b = b, Π = Π, z_vals = z_vals, - asset_grid = asset_grid) + return (; r, R, beta, b, Pi, z_vals, + asset_grid) end function T!(cp, V, out; ret_policy = false) # unpack input, set up arrays - (; R, Π, beta, b, asset_grid, z_vals) = cp + (; R, Pi, beta, b, asset_grid, z_vals) = cp z_idx = 1:length(z_vals) # value function when the shock index is z_i @@ -416,7 +416,7 @@ function T!(cp, V, out; ret_policy = false) for (i_z, z) in enumerate(z_vals) for (i_a, a) in enumerate(asset_grid) function obj(c) - EV = dot(vf.(R * a + z - c, z_idx), Π[i_z, :]) # compute expectation + EV = dot(vf.(R * a + z - c, z_idx), Pi[i_z, :]) # compute expectation return u(c) + beta * EV end res = maximize(obj, opt_lb, R .* a .+ z .+ b) @@ -429,7 +429,7 @@ function T!(cp, V, out; ret_policy = false) end end end - out + return out end T(cp, V; ret_policy = false) = T!(cp, V, similar(V); ret_policy = ret_policy) @@ -440,7 +440,7 @@ get_greedy(cp, V) = update_bellman(cp, V, ret_policy = true) function K!(cp, c, out) # simplify names, set up arrays - (; R, Π, beta, b, asset_grid, z_vals) = cp + (; R, Pi, beta, b, asset_grid, z_vals) = cp z_idx = 1:length(z_vals) gam = R * beta @@ -454,7 +454,7 @@ function K!(cp, c, out) for (i_a, a) in enumerate(asset_grid) function h(t) cps = cf.(R * a + z - t, z_idx) # c' for each z' - expectation = dot(du.(cps), Π[i_z, :]) + expectation = dot(du.(cps), Pi[i_z, :]) return abs(du(t) - max(gam * expectation, du(R * a + z + b))) end opt_ub = R * a + z + b # addresses issue #8 on github @@ -739,7 +739,7 @@ end ```{code-cell} julia function compute_asset_series(cp, T = 500_000; verbose = false) - (; Π, z_vals, R) = cp # simplify names + (; Pi, z_vals, R) = cp # simplify names z_idx = 1:length(z_vals) v_init, c_init = initialize(cp) c = compute_fixed_point(x -> K(cp, x), c_init, @@ -748,7 +748,7 @@ function compute_asset_series(cp, T = 500_000; verbose = false) cf = interp(cp.asset_grid, c) a = zeros(T + 1) - z_seq = simulate(MarkovChain(Π), T) + z_seq = simulate(MarkovChain(Pi), T) for t in 1:T i_z = z_seq[t] a[t + 1] = R * a[t] + z_vals[i_z] - cf(a[t], i_z) diff --git a/lectures/dynamic_programming/jv.md b/lectures/dynamic_programming/jv.md index 3b0c2671..d4ce301d 100644 --- a/lectures/dynamic_programming/jv.md +++ b/lectures/dynamic_programming/jv.md @@ -44,7 +44,7 @@ kernelspec: tags: [hide-output] --- using LinearAlgebra, Statistics -using Distributions, Interpolations, Expectations, Parameters +using Distributions, Interpolations, Expectations using LaTeXStrings, Plots, NLsolve, Random ``` @@ -191,7 +191,7 @@ function JvWorker(; A = 1.4, grid_size = 50, epsilon = 1e-4) G(x, phi) = A .* (x .* phi) .^ alpha - π_func = sqrt + pi_func = sqrt F = Beta(2, 2) # expectation operator @@ -206,8 +206,8 @@ function JvWorker(; A = 1.4, # CoordInterpGrid below x_grid = range(epsilon, grid_max, length = grid_size) - return (A = A, alpha = alpha, beta = beta, x_grid = x_grid, G = G, - π_func = π_func, F = F, E = E, epsilon = epsilon) + return (; A, alpha, beta, x_grid, G, + pi_func, F, E, epsilon) end function T!(jv, @@ -215,7 +215,7 @@ function T!(jv, new_V::AbstractVector) # simplify notation - (; G, π_func, F, beta, E, epsilon) = jv + (; G, pi_func, F, beta, E, epsilon) = jv # prepare interpoland of value function Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc = Line()) @@ -232,7 +232,7 @@ function T!(jv, s, phi = z h(u) = Vf(max(G(x, phi), u)) integral = E(h) - q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, phi)) + q = pi_func(s) * integral + (1.0 - pi_func(s)) * Vf(G(x, phi)) return -x * (1.0 - phi - s) - beta * q end @@ -250,12 +250,10 @@ function T!(jv, end end -function T!(jv, - V, - out::Tuple{AbstractVector, AbstractVector}) +function T!(jv, V, out::Tuple{AbstractVector, AbstractVector}) # simplify notation - (; G, π_func, F, beta, E, epsilon) = jv + (; G, pi_func, F, beta, E, epsilon) = jv # prepare interpoland of value function Vf = LinearInterpolation(jv.x_grid, V, extrapolation_bc = Line()) @@ -275,7 +273,7 @@ function T!(jv, s, phi = z h(u) = Vf(max(G(x, phi), u)) integral = E(h) - q = π_func(s) * integral + (1.0 - π_func(s)) * Vf(G(x, phi)) + q = pi_func(s) * integral + (1.0 - pi_func(s)) * Vf(G(x, phi)) return -x * (1.0 - phi - s) - beta * q end @@ -471,7 +469,7 @@ Here's code to produce the 45 degree diagram ```{code-cell} julia wp = JvWorker(grid_size = 25) # simplify notation -(; G, π_func, F) = wp +(; G, pi_func, F) = wp v_init = collect(wp.x_grid) * 0.5 f2(x) = T(wp, x) @@ -509,7 +507,7 @@ xs = [] ys = [] for x in plot_grid for i in 1:K - b = rand() < π_func(s(x)) ? 1 : 0 + b = rand() < pi_func(s(x)) ? 1 : 0 U = rand(wp.F) y = h_func(x, b, U) push!(xs, x) diff --git a/lectures/dynamic_programming/lqcontrol.md b/lectures/dynamic_programming/lqcontrol.md index 75a24175..27c6c161 100644 --- a/lectures/dynamic_programming/lqcontrol.md +++ b/lectures/dynamic_programming/lqcontrol.md @@ -545,7 +545,7 @@ to solve finite and infinite horizon linear quadratic control problems. In the module, the various updating, simulation and fixed point methods act on a type called `LQ`, which includes * Instance data: - * The required parameters $Q, R, A, B$ and optional parameters C, beta, T, R_f, N specifying a given LQ model + * The required parameters $Q, R, A, B$ and optional parameters C, $\beta$, T, R_f, N specifying a given LQ model * set $T$ and $R_f$ to `None` in the infinite horizon case * set `C = None` (or zero) in the deterministic case * the value function and policy data @@ -654,7 +654,7 @@ Random.seed!(42); r = 0.05 beta = 1 / (1 + r) T = 45 -c̄ = 2.0 +c_bar = 2.0 sigma = 0.25 mu = 1.0 q = 1e6 @@ -664,7 +664,7 @@ Q = 1.0 R = zeros(2, 2) Rf = zeros(2, 2); Rf[1, 1] = q; -A = [1+r -c̄+mu; 0 1] +A = [1+r -c_bar+mu; 0 1] B = [-1.0, 0] C = [sigma, 0] @@ -675,7 +675,7 @@ xp, up, wp = compute_sequence(lq, x0) # convert back to assets, consumption and income assets = vec(xp[1, :]) # a_t -c = vec(up .+ c̄) # c_t +c = vec(up .+ c_bar) # c_t income = vec(sigma * wp[1, 2:end] .+ mu) # y_t # plot results @@ -741,7 +741,7 @@ xp, up, wp = compute_sequence(lq, x0) # convert back to assets, consumption and income assets = vec(xp[1, :]) # a_t -c = vec(up .+ c̄) # c_t +c = vec(up .+ c_bar) # c_t income = vec(sigma * wp[1, 2:end] .+ mu) # y_t # plot results @@ -1304,7 +1304,7 @@ $p(t) = m_1 t + m_2 t^2$ has an inverted U shape with r = 0.05 beta = 1 / (1 + r) T = 50 -c̄ = 1.5 +c_bar = 1.5 sigma = 0.15 mu = 2 q = 1e4 @@ -1316,7 +1316,7 @@ Q = 1.0 R = zeros(4, 4) Rf = zeros(4, 4); Rf[1, 1] = q; -A = [1+r -c̄ m1 m2; +A = [1+r -c_bar m1 m2; 0 1 0 0; 0 1 1 0; 0 1 2 1] @@ -1330,7 +1330,7 @@ xp, up, wp = compute_sequence(lq, x0) # convert results back to assets, consumption and income ap = vec(xp[1, 1:end]) # assets -c = vec(up .+ c̄) # consumption +c = vec(up .+ c_bar) # consumption time = 1:T income = sigma * vec(wp[1, 2:end]) + m1 * time + m2 * time .^ 2 # income @@ -1356,7 +1356,7 @@ r = 0.05 beta = 1 / (1 + r) T = 60 K = 40 -c̄ = 4 +c_bar = 4 sigma = 0.35 mu = 4 q = 1e4 @@ -1369,7 +1369,7 @@ Q = 1.0 R = zeros(4, 4) Rf = zeros(4, 4); Rf[1, 1] = q; -A = [1+r s-c̄ 0 0; +A = [1+r s-c_bar 0 0; 0 1 0 0; 0 1 1 0; 0 1 2 1] @@ -1392,7 +1392,7 @@ Rf2 = lq_retired_proxy.P # formulate LQ problem 2 (working life) Q = 1.0 R = zeros(4, 4) -A = [1+r -c̄ m1 m2; +A = [1+r -c_bar m1 m2; 0 1 0 0; 0 1 1 0; 0 1 2 1] @@ -1414,7 +1414,7 @@ xp = [xp_w xp_r[:, 2:end]] assets = vec(xp[1, :]) # assets up = [up_w up_r] -c = vec(up .+ c̄) # consumption +c = vec(up .+ c_bar) # consumption time = 1:K income_w = sigma * vec(wp_w[1, 2:(K + 1)]) + m1 .* time + m2 .* time .^ 2 # income diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index e9125c07..5dd618f6 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -292,7 +292,7 @@ tags: [hide-output] --- using LinearAlgebra, Statistics using Distributions, Expectations, LaTeXStrings -using NLsolve, Roots, Random, Plots, Parameters +using NLsolve, Roots, Random, Plots ``` ```{code-cell} julia @@ -312,7 +312,7 @@ Here's the distribution of wage offers we'll work with n = 50 dist = BetaBinomial(n, 200, 100) # probability distribution @show support(dist) -w = range(10.0, 60.0, length = n+1) # linearly space wages +w = range(10.0, 60.0, length = n + 1) # linearly space wages using StatsPlots plt = plot(w, pdf.(dist, support(dist)), xlabel = "wages", @@ -325,7 +325,7 @@ We can explore taking expectations over this distribution E = expectation(dist) # expectation operator # exploring the properties of the operator -wage(i) = w[i+1] # +1 to map from support of 0 +wage(i) = w[i + 1] # +1 to map from support of 0 E_w = E(wage) E_w_2 = E(i -> wage(i)^2) - E_w^2 # variance @show E_w, E_w_2 @@ -346,16 +346,16 @@ Our initial guess $v$ is the value of accepting at every given wage # parameters and constant objects c = 25 -β = 0.99 +beta = 0.99 num_plots = 6 # Operator -T(v) = max.(w/(1 - β), c + β * E*v) # (5) broadcasts over the w, fixes the v -# alternatively, T(v) = [max(wval/(1 - β), c + β * E*v) for wval in w] +T(v) = max.(w / (1 - beta), c + beta * E * v) # (5) broadcasts over the w, fixes the v +# alternatively, T(v) = [max(wval/(1 - beta), c + beta * E*v) for wval in w] # fill in matrix of vs vs = zeros(n + 1, 6) # data to fill -vs[:, 1] .= w / (1-β) # initial guess of "accept all" +vs[:, 1] .= w / (1 - beta) # initial guess of "accept all" # manually applying operator for col in 2:num_plots @@ -369,12 +369,12 @@ One approach to solving the model is to directly implement this sort of iteratio between successive iterates is below tol ```{code-cell} julia -function compute_reservation_wage_direct(params; v_iv = collect(w ./(1-β)), +function compute_reservation_wage_direct(params; v_iv = collect(w ./ (1 - beta)), max_iter = 500, tol = 1e-6) - (;c, β, w) = params + (; c, beta, w) = params # create a closure for the T operator - T(v) = max.(w/(1 - β), c + β * E*v) # (5) fixing the parameter values + T(v) = max.(w / (1 - beta), c + beta * E * v) # (5) fixing the parameter values v = copy(v_iv) # copy to prevent v_iv modification v_next = similar(v) @@ -387,7 +387,7 @@ function compute_reservation_wage_direct(params; v_iv = collect(w ./(1-β)), v .= v_next # copy contents into v end # now compute the reservation wage - return (1 - β) * (c + β * E*v) # (2) + return (1 - beta) * (c + beta * E * v) # (2) end ``` @@ -408,15 +408,15 @@ As usual, we are better off using a package, which may give a better algorithm a In this case, we can use the `fixedpoint` algorithm discussed in {doc}`our Julia by Example lecture <../getting_started_julia/julia_by_example>` to find the fixed point of the $T$ operator. Note that below we set the parameter `m=1` for Anderson iteration rather than leaving as the default value - which fails to converge in this case. This is still almost 10x faster than the `m=0` case, which corresponds to naive fixed-point iteration. ```{code-cell} julia -function compute_reservation_wage(params; v_iv = collect(w ./(1-β)), +function compute_reservation_wage(params; v_iv = collect(w ./ (1 - beta)), iterations = 500, ftol = 1e-6, m = 1) - (;c, β, w) = params - T(v) = max.(w/(1 - β), c + β * E*v) # (5) fixing the parameter values + (; c, beta, w) = params + T(v) = max.(w / (1 - beta), c + beta * E * v) # (5) fixing the parameter values sol = fixedpoint(T, v_iv; iterations, ftol, m) # (5) sol.f_converged || error("Failed to converge") v_star = sol.zero - return (1 - β) * (c + β * E*v_star) # (3) + return (1 - beta) * (c + beta * E * v_star) # (3) end ``` @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm (c=25.0, β=0.99, w=w) = (;c, β, w )# named tuples +mcm(c = 25.0, beta = 0.99, w) = (; c, beta, w)# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` @@ -455,11 +455,11 @@ grid_size = 25 R = zeros(grid_size, grid_size) c_vals = range(10.0, 30.0, length = grid_size) -β_vals = range(0.9, 0.99, length = grid_size) +beta_vals = range(0.9, 0.99, length = grid_size) for (i, c) in enumerate(c_vals) - for (j, β) in enumerate(β_vals) - R[i, j] = compute_reservation_wage(mcm(;c, β);m=0) + for (j, beta) in enumerate(beta_vals) + R[i, j] = compute_reservation_wage(mcm(; c, beta); m = 0) end end ``` @@ -480,12 +480,12 @@ tags: [remove-cell] @test R[4, 4] ≈ 41.15851842606614 # arbitrary reservation wage. @test grid_size == 25 # grid invariance. @test length(c_vals) == grid_size && c_vals[1] ≈ 10.0 && c_vals[end] ≈ 30.0 - @test length(β_vals) == grid_size && β_vals[1] ≈ 0.9 && β_vals[end] ≈ 0.99 + @test length(beta_vals) == grid_size && beta_vals[1] ≈ 0.9 && beta_vals[end] ≈ 0.99 end ``` ```{code-cell} julia -contour(c_vals, β_vals, R', +contour(c_vals, beta_vals, R', title = "Reservation Wage", xlabel = L"c", ylabel = L"\beta", @@ -574,16 +574,16 @@ The big difference here, however, is that we're iterating on a single number, ra Here's an implementation: ```{code-cell} julia -function compute_reservation_wage_ψ(c, β; ψ_iv = E * w ./ (1 - β), - iterations = 500, ftol = 1e-5, m = 1) - T_ψ(ψ) = [c + β * E*max.((w ./ (1 - β)), ψ[1])] # (7) +function compute_reservation_wage_psi(c, beta; psi_iv = E * w ./ (1 - beta), + iterations = 500, ftol = 1e-5, m = 1) + T_psi(psi) = [c + beta * E * max.((w ./ (1 - beta)), psi[1])] # (7) # using vectors since fixedpoint doesn't support scalar - sol = fixedpoint(T_ψ, [ψ_iv]; iterations, ftol, m) - sol.f_converged || error("Failed to converge") - ψ_star = sol.zero[1] - return (1 - β) * ψ_star # (2) + sol = fixedpoint(T_psi, [psi_iv]; iterations, ftol, m) + sol.f_converged || error("Failed to converge") + psi_star = sol.zero[1] + return (1 - beta) * psi_star # (2) end -compute_reservation_wage_ψ(c, β) +compute_reservation_wage_psi(c, beta) ``` You can use this code to solve the exercise below. @@ -591,13 +591,13 @@ You can use this code to solve the exercise below. Another option is to solve for the root of the $T_{\psi}(\psi) - \psi$ equation ```{code-cell} julia -function compute_reservation_wage_ψ2(c, β; ψ_iv = E * w ./ (1 - β), - maxiters = 500, rtol = 1e-5) - root_ψ(ψ) = c + β * E*max.((w ./ (1 - β)), ψ) - ψ # (7) - ψ_star = find_zero(root_ψ, ψ_iv;maxiters, rtol) - return (1 - β) * ψ_star # (2) +function compute_reservation_wage_psi2(c, beta; psi_iv = E * w ./ (1 - beta), + maxiters = 500, rtol = 1e-5) + root_psi(psi) = c + beta * E * max.((w ./ (1 - beta)), psi) - psi # (7) + psi_star = find_zero(root_psi, psi_iv; maxiters, rtol) + return (1 - beta) * psi_star # (2) end -compute_reservation_wage_ψ2(c, β) +compute_reservation_wage_psi2(c, beta) ``` ```{code-cell} julia @@ -607,8 +607,8 @@ tags: [remove-cell] @testset begin mcmp = mcm() @test compute_reservation_wage(mcmp) ≈ 47.316499766546215 - @test compute_reservation_wage_ψ(mcmp.c, mcmp.β) ≈ 47.31649976654623 - @test compute_reservation_wage_ψ2(mcmp.c, mcmp.β) ≈ 47.31649976654623 + @test compute_reservation_wage_psi(mcmp.c, mcmp.beta) ≈ 47.31649976654623 + @test compute_reservation_wage_psi2(mcmp.c, mcmp.beta) ≈ 47.31649976654623 end ``` @@ -635,16 +635,16 @@ Plot mean unemployment duration as a function of $c$ in `c_vals`. Here's one solution ```{code-cell} julia -function compute_stopping_time(w̄; seed=1234) +function compute_stopping_time(w_bar; seed = 1234) Random.seed!(seed) stopping_time = 0 t = 1 # make sure the constraint is sometimes binding - @assert length(w) - 1 ∈ support(dist) && w̄ <= w[end] + @assert length(w) - 1 ∈ support(dist) && w_bar <= w[end] while true # Generate a wage draw w_val = w[rand(dist)] # the wage dist set up earlier - if w_val ≥ w̄ + if w_val >= w_bar stopping_time = t break else @@ -654,16 +654,17 @@ function compute_stopping_time(w̄; seed=1234) return stopping_time end -compute_mean_stopping_time(w̄, num_reps=10000) = mean(i -> - compute_stopping_time(w̄, - seed = i), 1:num_reps) -c_vals = range(10, 40, length = 25) +function compute_mean_stopping_time(w_bar, num_reps = 10000) + mean(i -> compute_stopping_time(w_bar, + seed = i), 1:num_reps) +end +c_vals = range(10, 40, length = 25) stop_times = similar(c_vals) beta = 0.99 for (i, c) in enumerate(c_vals) - w̄ = compute_reservation_wage_ψ(c, beta) - stop_times[i] = compute_mean_stopping_time(w̄) + w_bar = compute_reservation_wage_psi(c, beta) + stop_times[i] = compute_mean_stopping_time(w_bar) end plot(c_vals, stop_times, label = "mean unemployment duration", diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 5751bb94..6b399314 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -47,7 +47,7 @@ Once separation enters the picture, the agent comes to view tags: [hide-output] --- using LinearAlgebra, Statistics -using Distributions, Expectations, LaTeXStrings, Parameters, NLsolve, Plots +using Distributions, Expectations, LaTeXStrings,NLsolve, Plots ``` ## The Model @@ -233,44 +233,45 @@ using Test ``` ```{code-cell} julia -using Distributions, LinearAlgebra, Expectations, Parameters, NLsolve, Plots +using Distributions, LinearAlgebra, Expectations, NLsolve, Plots function solve_mccall_model(mcm; U_iv = 1.0, V_iv = ones(length(mcm.w)), tol = 1e-5, iter = 2_000) - (; α, β, σ, c, γ, w, dist, u) = mcm + (; alpha, beta, sigma, c, gamma, w, dist, u) = mcm # parameter validation @assert c > 0.0 @assert minimum(w) > 0.0 # perhaps not strictly necessary, but useful here # necessary objects - u_w = u.(w, σ) - u_c = u(c, σ) + u_w = u.(w, sigma) + u_c = u(c, sigma) E = expectation(dist) # expectation operator for wage distribution # Bellman operator T. Fixed point is x* s.t. T(x*) = x* function T(x) - V = x[1:end-1] + V = x[1:(end - 1)] U = x[end] - [u_w + β * ((1 - α) * V .+ α * U); u_c + β * (1 - γ) * U + β * γ * E * max.(U, V)] + [u_w + beta * ((1 - alpha) * V .+ alpha * U); + u_c + beta * (1 - gamma) * U + beta * gamma * E * max.(U, V)] end # value function iteration x_iv = [V_iv; U_iv] # initial x val xstar = fixedpoint(T, x_iv, iterations = iter, xtol = tol, m = 0).zero - V = xstar[1:end-1] + V = xstar[1:(end - 1)] U = xstar[end] # compute the reservation wage wbarindex = searchsortedfirst(V .- U, 0.0) if wbarindex >= length(w) # if this is true, you never want to accept - w̄ = Inf + w_bar = Inf else - w̄ = w[wbarindex] # otherwise, return the number + w_bar = w[wbarindex] # otherwise, return the number end # return a NamedTuple, so we can select values by name - return (V = V, U = U, w̄ = w̄) + return (; V, U, w_bar) end ``` @@ -284,29 +285,29 @@ We'll use the default parameterizations found in the code above. ```{code-cell} julia # a default utility function -u(c, σ) = (c^(1 - σ) - 1) / (1 - σ) +u(c, sigma) = (c^(1 - sigma) - 1) / (1 - sigma) # model constructor -McCallModel(α = 0.2, - β = 0.98, # discount rate - γ = 0.7, - c = 6.0, # unemployment compensation - σ = 2.0, - u = u, # utility function - w = range(10, 20, length = 60), # wage values - dist = BetaBinomial(59, 600, 400)) # distribution over wage values - = (;α, β , γ ,c , σ,u , w , dist) +function McCallModel(alpha = 0.2, + beta = 0.98, # discount rate + gamma = 0.7, + c = 6.0, # unemployment compensation + sigma = 2.0, + u = u, # utility function + w = range(10, 20, length = 60), # wage values + dist = BetaBinomial(59, 600, 400)) # distribution over wage values + return (; alpha, beta, gamma, c, sigma, u, w, dist) +end ``` ```{code-cell} julia # plots setting - mcm = McCallModel() (; V, U) = solve_mccall_model(mcm) U_vec = fill(U, length(mcm.w)) -plot(mcm.w, [V U_vec], lw = 2, α = 0.7, label = [L"V" L"U"]) +plot(mcm.w, [V U_vec], lw = 2, alpha = 0.7, label = [L"V" L"U"]) ``` ```{code-cell} julia @@ -407,7 +408,7 @@ Plot the reservation wage against the job offer rate $\gamma$. Use ```{code-cell} julia -γ_vals = range(0.05, 0.95, length = 25) +gamma_vals = range(0.05, 0.95, length = 25) ``` Interpret your results. @@ -421,25 +422,25 @@ we can create an array for reservation wages for different values of $c$, $\beta$ and $\alpha$ and plot the results like so ```{code-cell} julia -c_vals = range(2, 12, length = 25) +c_vals = range(2, 12, length = 25) models = [McCallModel(c = cval) for cval in c_vals] sols = solve_mccall_model.(models) -w̄_vals = [sol.w̄ for sol in sols] +w_bar_vals = [sol.w_bar for sol in sols] plot(c_vals, - w̄_vals, - lw = 2, - α = 0.7, - xlabel = "unemployment compensation", - ylabel = "reservation wage", - label = L"$\overline{w}$ as a function of $c$") + w_bar_vals, + lw = 2, + alpha = 0.7, + xlabel = "unemployment compensation", + ylabel = "reservation wage", + label = L"$\overline{w}$ as a function of $c$") ``` Note that we could've done the above in one pass (which would be important if, for example, the parameter space was quite large). ```{code-cell} julia -w̄_vals = [solve_mccall_model(McCallModel(c = cval)).w̄ for cval in c_vals]; +w_bar_vals = [solve_mccall_model(McCallModel(c = cval)).w_bar for cval in c_vals]; # doesn't allocate new arrays for models and solutions ``` @@ -448,9 +449,9 @@ w̄_vals = [solve_mccall_model(McCallModel(c = cval)).w̄ for cval in c_vals]; tags: [remove-cell] --- @testset "Solutions 1 Tests" begin - #test w̄_vals[10] ≈ 11.35593220338983 + #test w_bar_vals[10] ≈ 11.35593220338983 @test c_vals[1] == 2 && c_vals[end] == 12 && length(c_vals) == 25 - #test w̄_vals[17] - c_vals[17] ≈ 4.72316384180791 + #test w_bar_vals[17] - c_vals[17] ≈ 4.72316384180791 # Just a sanity check on how these things relate. end ``` @@ -460,14 +461,15 @@ end Similar to above, we can plot $\bar w$ against $\gamma$ as follows ```{code-cell} julia -γ_vals = range(0.05, 0.95, length = 25) +gamma_vals = range(0.05, 0.95, length = 25) -models = [McCallModel(γ = γval) for γval in γ_vals] +models = [McCallModel(gamma = gammaval) for gammaval in gamma_vals] sols = solve_mccall_model.(models) -w̄_vals = [sol.w̄ for sol in sols] +w_bar_vals = [sol.w_bar for sol in sols] -plot(γ_vals, w̄_vals, lw = 2, α = 0.7, xlabel = "job offer rate", - ylabel = "reservation wage", label = L"$\overline{w}$ as a function of $\gamma$") +plot(gamma_vals, w_bar_vals, lw = 2, alpha = 0.7, xlabel = "job offer rate", + ylabel = "reservation wage", + label = L"$\overline{w}$ as a function of $\gamma$") ``` ```{code-cell} julia @@ -475,8 +477,8 @@ plot(γ_vals, w̄_vals, lw = 2, α = 0.7, xlabel = "job offer rate", tags: [remove-cell] --- @testset "Solutions 2 Tests" begin - #test w̄_vals[17] ≈ 11.35593220338983 # same as w̄_vals[10] before. - @test γ_vals[1] ≈ 0.05 && γ_vals[end] ≈ 0.95 && length(γ_vals) == 25 + #test w_bar_vals[17] ≈ 11.35593220338983 # same as w_bar_vals[10] before. + @test gamma_vals[1] ≈ 0.05 && gamma_vals[end] ≈ 0.95 && length(gamma_vals) == 25 end ``` diff --git a/lectures/dynamic_programming/odu.md b/lectures/dynamic_programming/odu.md index 14c8aaf6..51d5b400 100644 --- a/lectures/dynamic_programming/odu.md +++ b/lectures/dynamic_programming/odu.md @@ -161,7 +161,7 @@ using Test # At the head of every lecture. ```{code-cell} julia using LinearAlgebra, Statistics -using Distributions, LaTeXStrings, Plots, QuantEcon, Interpolations, Parameters +using Distributions, LaTeXStrings, Plots, QuantEcon, Interpolations w_max = 2 x = range(0, w_max, length = 200) @@ -212,7 +212,7 @@ The code is as follows. # use key word argment function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1, G_a = 3, G_b = 1.2, w_max = 2.0, - w_grid_size = 40, π_grid_size = 40) + w_grid_size = 40, pi_grid_size = 40) F = Beta(F_a, F_b) G = Beta(G_a, G_b) @@ -220,26 +220,26 @@ function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1, f(x) = pdf.(F, x / w_max) / w_max g(x) = pdf.(G, x / w_max) / w_max - π_min = 1e-3 # avoids instability - π_max = 1 - π_min + pi_min = 1e-3 # avoids instability + pi_max = 1 - pi_min w_grid = range(0, w_max, length = w_grid_size) - π_grid = range(π_min, π_max, length = π_grid_size) + pi_grid = range(pi_min, pi_max, length = pi_grid_size) nodes, weights = qnwlege(21, 0.0, w_max) - return (beta = beta, c = c, F = F, G = G, f = f, - g = g, n_w = w_grid_size, w_max = w_max, - w_grid = w_grid, n_π = π_grid_size, π_min = π_min, - π_max = π_max, π_grid = π_grid, quad_nodes = nodes, + return (; beta, c, F, G, f, + g, n_w = w_grid_size, w_max, + w_grid, n_pi = pi_grid_size, pi_min, + pi_max, pi_grid = pi_grid, quad_nodes = nodes, quad_weights = weights) end -function q(sp, w, π_val) - new_π = 1.0 / (1 + ((1 - π_val) * sp.g(w)) / (π_val * sp.f(w))) +function q(sp, w, pi_val) + new_pi = 1.0 / (1 + ((1 - pi_val) * sp.g(w)) / (pi_val * sp.f(w))) - # Return new_π when in [π_min, π_max] and else end points - return clamp(new_π, sp.π_min, sp.π_max) + # Return new_pi when in [pi_min, pi_max] and else end points + return clamp(new_pi, sp.pi_min, sp.pi_max) end function T!(sp, v, out; @@ -248,7 +248,7 @@ function T!(sp, v, out; (; f, g, beta, c) = sp nodes, weights = sp.quad_nodes, sp.quad_weights - vf = extrapolate(interpolate((sp.w_grid, sp.π_grid), v, + vf = extrapolate(interpolate((sp.w_grid, sp.pi_grid), v, Gridded(Linear())), Flat()) # set up quadrature nodes/weights @@ -258,18 +258,18 @@ function T!(sp, v, out; # calculate v1 v1 = w / (1 - beta) - for (π_j, _π) in enumerate(sp.π_grid) + for (pi_j, _pi) in enumerate(sp.pi_grid) # calculate v2 function integrand(m) - [vf(m[i], q.(Ref(sp), m[i], _π)) * - (_π * f(m[i]) + (1 - _π) * g(m[i])) for i in 1:length(m)] + [vf(m[i], q.(Ref(sp), m[i], _pi)) * + (_pi * f(m[i]) + (1 - _pi) * g(m[i])) for i in 1:length(m)] end integral = do_quad(integrand, nodes, weights) # integral = do_quad(integrand, q_nodes, q_weights) v2 = c + beta * integral # return policy if asked for, otherwise return max of values - out[w_i, π_j] = ret_policy ? v1 > v2 : max(v1, v2) + out[w_i, pi_j] = ret_policy ? v1 > v2 : max(v1, v2) end end return out @@ -278,7 +278,7 @@ end function T(sp, v; ret_policy = false) out_type = ret_policy ? Bool : Float64 - out = zeros(out_type, sp.n_w, sp.n_π) + out = zeros(out_type, sp.n_w, sp.n_pi) T!(sp, v, out, ret_policy = ret_policy) end @@ -290,15 +290,15 @@ function res_wage_operator!(sp, phi, out) # simplify name (; f, g, beta, c) = sp - # Construct interpolator over π_grid, given phi - phi_f = LinearInterpolation(sp.π_grid, phi, extrapolation_bc = Line()) + # Construct interpolator over pi_grid, given phi + phi_f = LinearInterpolation(sp.pi_grid, phi, extrapolation_bc = Line()) # set up quadrature nodes/weights q_nodes, q_weights = qnwlege(7, 0.0, sp.w_max) - for (i, _π) in enumerate(sp.π_grid) + for (i, _pi) in enumerate(sp.pi_grid) function integrand(x) - max.(x, phi_f.(q.(Ref(sp), x, _π))) .* (_π * f(x) + (1 - _π) * g(x)) + max.(x, phi_f.(q.(Ref(sp), x, _pi))) .* (_pi * f(x) + (1 - _pi) * g(x)) end integral = do_quad(integrand, q_nodes, q_weights) out[i] = (1 - beta) * c + beta * integral @@ -327,25 +327,25 @@ Here's the value function: ```{code-cell} julia # Set up the problem and initial guess, solve by VFI -sp = SearchProblem(; w_grid_size = 100, π_grid_size = 100) -v_init = fill(sp.c / (1 - sp.beta), sp.n_w, sp.n_π) +sp = SearchProblem(; w_grid_size = 100, pi_grid_size = 100) +v_init = fill(sp.c / (1 - sp.beta), sp.n_w, sp.n_pi) f(x) = T(sp, x) v = compute_fixed_point(f, v_init) policy = get_greedy(sp, v) # Make functions for the linear interpolants of these -vf = extrapolate(interpolate((sp.w_grid, sp.π_grid), v, Gridded(Linear())), +vf = extrapolate(interpolate((sp.w_grid, sp.pi_grid), v, Gridded(Linear())), Flat()) -pf = extrapolate(interpolate((sp.w_grid, sp.π_grid), policy, +pf = extrapolate(interpolate((sp.w_grid, sp.pi_grid), policy, Gridded(Linear())), Flat()) function plot_value_function(; w_plot_grid_size = 100, - π_plot_grid_size = 100) - π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) + pi_plot_grid_size = 100) + pi_plot_grid = range(0.001, 0.99, length = pi_plot_grid_size) w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) - Z = [vf(w_plot_grid[j], π_plot_grid[i]) - for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] - p = contour(π_plot_grid, w_plot_grid, Z, levels = 15, alpha = 0.6, + Z = [vf(w_plot_grid[j], pi_plot_grid[i]) + for j in 1:w_plot_grid_size, i in 1:pi_plot_grid_size] + p = contour(pi_plot_grid, w_plot_grid, Z, levels = 15, alpha = 0.6, fill = true, size = (400, 400), c = :lightrainbow) plot!(xlabel = L"\pi", ylabel = L"w", xguidefont = font(12)) return p @@ -359,12 +359,12 @@ The optimal policy: ```{code-cell} julia function plot_policy_function(; w_plot_grid_size = 100, - π_plot_grid_size = 100) - π_plot_grid = range(0.001, 0.99, length = π_plot_grid_size) + pi_plot_grid_size = 100) + pi_plot_grid = range(0.001, 0.99, length = pi_plot_grid_size) w_plot_grid = range(0, sp.w_max, length = w_plot_grid_size) - Z = [pf(w_plot_grid[j], π_plot_grid[i]) - for j in 1:w_plot_grid_size, i in 1:π_plot_grid_size] - p = contour(π_plot_grid, w_plot_grid, Z, levels = 1, alpha = 0.6, fill = true, + Z = [pf(w_plot_grid[j], pi_plot_grid[i]) + for j in 1:w_plot_grid_size, i in 1:pi_plot_grid_size] + p = contour(pi_plot_grid, w_plot_grid, Z, levels = 1, alpha = 0.6, fill = true, size = (400, 400), c = :coolwarm) plot!(xlabel = L"\pi", ylabel = "wage", xguidefont = font(12), cbar = false) annotate!(0.4, 1.0, "reject") @@ -555,15 +555,15 @@ time is much shorter than that of the value function approach in `examples/odu_vfi_plots.jl`. ```{code-cell} julia -sp = SearchProblem(π_grid_size = 50) +sp = SearchProblem(pi_grid_size = 50) -phi_init = ones(sp.n_π) +phi_init = ones(sp.n_pi) f_ex1(x) = res_wage_operator(sp, x) -w̄ = compute_fixed_point(f_ex1, phi_init) +w_bar = compute_fixed_point(f_ex1, phi_init) -plot(sp.π_grid, w̄, linewidth = 2, color = :black, +plot(sp.pi_grid, w_bar, linewidth = 2, color = :black, fillrange = 0, fillalpha = 0.15, fillcolor = :blue) -plot!(sp.π_grid, 2 * ones(length(w̄)), linewidth = 0, fillrange = w̄, +plot!(sp.pi_grid, 2 * ones(length(w_bar)), linewidth = 0, fillrange = w_bar, fillalpha = 0.12, fillcolor = :green, legend = :none) plot!(ylims = (0, 2), annotations = [(0.42, 1.2, "reject"), (0.7, 1.8, "accept")]) @@ -585,29 +585,29 @@ The code takes a few minutes to run. using Random Random.seed!(42) -# Set up model and compute the function w̄ -sp = SearchProblem(π_grid_size = 50, F_a = 1, F_b = 1) -phi_init = ones(sp.n_π) +# Set up model and compute the function w_bar +sp = SearchProblem(pi_grid_size = 50, F_a = 1, F_b = 1) +phi_init = ones(sp.n_pi) g(x) = res_wage_operator(sp, x) -w̄_vals = compute_fixed_point(g, phi_init) -w̄ = extrapolate(interpolate((sp.π_grid,), w̄_vals, +w_bar_vals = compute_fixed_point(g, phi_init) +w_bar = extrapolate(interpolate((sp.pi_grid,), w_bar_vals, Gridded(Linear())), Flat()) # Holds the employment state and beliefs of an individual agent. mutable struct Agent{TF <: AbstractFloat, TI <: Integer} - _π::TF + _pi::TF employed::TI end -Agent(_π = 1e-3) = Agent(_π, 1) +Agent(_pi = 1e-3) = Agent(_pi, 1) function update!(ag, H) if ag.employed == 0 w = rand(H) * 2 # account for scale in julia - if w >= w̄(ag._π) + if w >= w_bar(ag._pi) ag.employed = 1 else - ag._π = 1.0 ./ (1 .+ ((1 - ag._π) .* sp.g(w)) ./ (ag._π * sp.f(w))) + ag._pi = 1.0 ./ (1 .+ ((1 - ag._pi) .* sp.g(w)) ./ (ag._pi * sp.f(w))) end end nothing diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index c958d731..f1297b75 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -464,7 +464,7 @@ The next figure illustrates piecewise linear interpolation of an arbitrary funct tags: [hide-output] --- using LinearAlgebra, Statistics -using LaTeXStrings, Plots, Interpolations, NLsolve, Optim, Random, Parameters +using LaTeXStrings, Plots, Interpolations, NLsolve, Optim, Random using Optim: maximum, maximizer ``` @@ -500,14 +500,15 @@ Here's a function that implements the Bellman operator using linear interpolatio ```{code-cell} julia function T(w; p, tol = 1e-10) - (; beta, u, f, ξ, y) = p # unpack parameters + (; beta, u, f, Xi, y) = p # unpack parameters w_func = LinearInterpolation(y, w) Tw = similar(w) sigma = similar(w) for (i, y_val) in enumerate(y) # solve maximization for each point in y, using y itself as initial condition. - results = maximize(c -> u(c; p) + beta * mean(w_func.(f(y_val - c; p) .* ξ)), + results = maximize(c -> u(c; p) + + beta * mean(w_func.(f(y_val - c; p) .* Xi)), tol, y_val) Tw[i] = maximum(results) sigma[i] = maximizer(results) @@ -571,8 +572,8 @@ function # named tuples defaults OptimalGrowthModel(alpha = 0.4, beta = 0.96, mu = 0.0, s = 0.1, u = u, f = f, # defaults defined above y = range(1e-5, 4.0, length = 200), # grid on y - ξ = exp.(mu .+ s * randn(250))) - (; alpha, beta, mu, s, u, f, y, ξ) + Xi = exp.(mu .+ s * randn(250))) + (; alpha, beta, mu, s, u, f, y, Xi) end # named tuples defaults # True value and policy function diff --git a/lectures/dynamic_programming/perm_income_cons.md b/lectures/dynamic_programming/perm_income_cons.md index 21a0dc7d..64d05460 100644 --- a/lectures/dynamic_programming/perm_income_cons.md +++ b/lectures/dynamic_programming/perm_income_cons.md @@ -532,9 +532,9 @@ In the code below, we use the [LSS](https://github.com/QuantEcon/QuantEcon.jl/bl graph as the population distribution ```{code-cell} julia -function income_consumption_debt_series(A, C, G, mu_0, Sigma_0, T = 150, npaths = 25) - lss = LSS(A, C, G, mu_0 = mu_0, Sigma_0 = Sigma_0) - +function income_consumption_debt_series(A, C, G, mu_0, Sigma_0, T = 150, + npaths = 25) + lss = LSS(A, C < G; mu_0, Sigma_0) # simulation/Moment Parameters moment_generator = moment_sequence(lss) diff --git a/lectures/dynamic_programming/smoothing.md b/lectures/dynamic_programming/smoothing.md index da554958..a6b006dd 100644 --- a/lectures/dynamic_programming/smoothing.md +++ b/lectures/dynamic_programming/smoothing.md @@ -356,7 +356,7 @@ using Test ```{code-cell} julia using LinearAlgebra, Statistics -using Parameters, Plots, QuantEcon, Random +using Plots, QuantEcon, Random ``` @@ -379,10 +379,10 @@ function consumption_complete(cp) # Using equation (7) calculate b2 b2 = (y2 - y1 - (Q[1, 1] - Q[2, 1] - 1) * b1) / (Q[1, 2] + 1 - Q[2, 2]) - # Using equation (5) calculae c̄ - c̄ = y1 - b0 + ([b1 b2] * Q[1, :])[1] + # Using equation (5) calculae c_bar + c_bar = y1 - b0 + ([b1 b2] * Q[1, :])[1] - return c̄, b1, b2 + return c_bar, b1, b2 end function consumption_incomplete(cp; N_simul = 150) @@ -418,9 +418,9 @@ Let's test by checking that $\bar c$ and $b_2$ satisfy the budget constraint ```{code-cell} julia cp = ConsumptionProblem() -c̄, b1, b2 = consumption_complete(cp) +c_bar, b1, b2 = consumption_complete(cp) debt_complete = [b1, b2] -isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) +isapprox((c_bar + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) ``` ```{code-cell} julia @@ -429,8 +429,8 @@ tags: [remove-cell] --- @testset "Budget Constraint Tests" begin # assert that the object above is always true - @test isapprox((c̄ + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) - @test c̄ ≈ 1.7241558441558444 # default example invariance + @test isapprox((c_bar + b2 - cp.y[2] - debt_complete' * (cp.beta * cp.P)[2, :])[1], 0) + @test c_bar ≈ 1.7241558441558444 # default example invariance @test debt_complete[2] ≈ 2.188311688311688 # one of the other objects end ``` @@ -585,14 +585,14 @@ Random.seed!(42) N_simul = 150 cp = ConsumptionProblem() -c̄, b1, b2 = consumption_complete(cp) +c_bar, b1, b2 = consumption_complete(cp) debt_complete = [b1, b2] c_path, debt_path, y_path, s_path = consumption_incomplete(cp, N_simul = N_simul) plt_cons = plot(title = "Consumption paths", xlabel = "Periods", ylim = [1.4, 2.1]) plot!(plt_cons, 1:N_simul, c_path, label = "incomplete market", lw = 2) -plot!(plt_cons, 1:N_simul, fill(c̄, N_simul), label = "complete market", lw = 2) +plot!(plt_cons, 1:N_simul, fill(c_bar, N_simul), label = "complete market", lw = 2) plot!(plt_cons, 1:N_simul, y_path, label = "income", lw = 2, alpha = 0.6, linestyle = :dash) plot!(plt_cons, legend = :bottom) @@ -635,7 +635,7 @@ We can simply relabel variables to acquire tax-smoothing interpretations of our plt_tax = plot(title = "Tax collection paths", x_label = "Periods", ylim = [1.4, 2.1]) plot!(plt_tax, 1:N_simul, c_path, label = "incomplete market", lw = 2) -plot!(plt_tax, 1:N_simul, fill(c̄, N_simul), label = "complete market", lw = 2) +plot!(plt_tax, 1:N_simul, fill(c_bar, N_simul), label = "complete market", lw = 2) plot!(plt_tax, 1:N_simul, y_path, label = "govt expenditures", alpha = 0.6, linestyle = :dash, lw = 2) @@ -708,13 +708,13 @@ cp = ConsumptionProblem(beta, y, b0, P) Q = beta * P N_simul = 150 -c̄, b1, b2 = consumption_complete(cp) +c_bar, b1, b2 = consumption_complete(cp) debt_complete = [b1, b2] println("P = $P") println("Q = $Q") println("Govt expenditures in peace and war = $y") -println("Constant tax collections = $c̄") +println("Constant tax collections = $c_bar") println("Govt assets in two states = $debt_complete") msg = """ @@ -731,9 +731,9 @@ println("Spending on Arrow war security in war = $AS2") println("\n") println("Government tax collections plus asset levels in peace and war") -TB1 = c̄ + b1 +TB1 = c_bar + b1 println("T+b in peace = $TB1") -TB2 = c̄ + b2 +TB2 = c_bar + b2 println("T+b in war = $TB2") println("\n") @@ -746,10 +746,10 @@ println("total govt spending in war = $G2") println("\n") println("Let's see ex post and ex ante returns on Arrow securities") -Π = 1 ./ Q # reciprocal(Q) -exret = Π +Pi = 1 ./ Q # reciprocal(Q) +exret = Pi println("Ex post returns to purchase of Arrow securities = $exret") -exant = Π .* P +exant = Pi .* P println("Ex ante returns to purchase of Arrow securities = $exant") ``` @@ -758,7 +758,7 @@ println("Ex ante returns to purchase of Arrow securities = $exant") tags: [remove-cell] --- @testset "Peace and War Debt Tests" begin - #test c̄ ≈ 1.3116883116883118 + #test c_bar ≈ 1.3116883116883118 #test G2 ≈ 2.9350649350649354 #test G1 ≈ 1.3116883116883118 #test AS2 ≈ 0.9350649350649349 diff --git a/lectures/dynamic_programming/wald_friedman.md b/lectures/dynamic_programming/wald_friedman.md index ac9cadf7..3413dd26 100644 --- a/lectures/dynamic_programming/wald_friedman.md +++ b/lectures/dynamic_programming/wald_friedman.md @@ -165,7 +165,7 @@ The bottom panel presents mixtures of these distributions, with various mixing p tags: [hide-output] --- using LinearAlgebra, Statistics -using Distributions, LaTeXStrings, Parameters, Printf, Random, Roots, Plots +using Distributions, LaTeXStrings, Printf, Random, Roots, Plots ``` @@ -426,7 +426,7 @@ function choice(p, d0, d1, L0, L1, c) end ``` -Next we solve a problem by finding the alpha, beta values for the decision rule +Next we solve a problem by finding the $\alpha$, $\beta$ values for the decision rule ```{code-cell} julia function decision_rule(d0, d1, L0, L1, c) @@ -449,8 +449,10 @@ function decision_rule(d0, d1, L0, L1, c) # x₁ to never again accepting x₁ at the various indifference points # Find alpha by checking for a permanent transition from the area accepting of # x₀ to never again accepting x₀ at the various indifference points - beta = findlast((left .== 2) .& (right .≠ 2)) |> (x -> isa(x, Int) ? roots[x] : 0) - alpha = findfirst((left .≠ 1) .& (right .== 1)) |> (x -> isa(x, Int) ? roots[x] : 1) + beta = findlast((left .== 2) .& (right .≠ 2)) |> + (x -> isa(x, Int) ? roots[x] : 0) + alpha = findfirst((left .≠ 1) .& (right .== 1)) |> + (x -> isa(x, Int) ? roots[x] : 1) if beta < alpha @printf("Accept x1 if p <= %.2f\nContinue to draw if %.2f <= p <= %.2f \nAccept x0 if p >= %.2f", beta, beta, alpha, alpha) @@ -511,7 +513,7 @@ function Problem(d0 = Beta(1, 1), d1 = Beta(9, 9), L0 = 2, L1 = 2, c = 0.2, p = 0.5, n = 100, return_output = false) - (; d0, d1, L0, L1, c, p, n, return_output) + return (; d0, d1, L0, L1, c, p, n, return_output) end; ``` From cccf20c21a92be72494dd0dd4a457aa9d4bc7377 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Wed, 30 Aug 2023 13:08:37 -0400 Subject: [PATCH 04/19] the_tuples_where_refferenced_using_named_args --- lectures/dynamic_programming/coleman_policy_iter.md | 4 ++-- lectures/dynamic_programming/mccall_model_with_separation.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index a338c2ea..6f22195e 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -404,7 +404,7 @@ function K!(Kg, g, grid, beta, dudc, f, f_prime, shocks) for (i, y) in enumerate(grid) function h(c) vals = dudc.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks - returndudc(c) - beta * mean(vals) + return dudc*c - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end @@ -465,7 +465,7 @@ Here's an object containing data from the log-linear growth model we used in the ```{code-cell} julia isoelastic(c, gamma) = isone(gamma) ? log(c) : (c^(1 - gamma) - 1) / (1 - gamma) -function Model(alpha = 0.65, # Productivity parameter +function Model(;alpha = 0.65, # Productivity parameter beta = 0.95, # Discount factor gamma = 1.0, # Risk aversion mu = 0.0, # First parameter in lognorm(mu, sigma) diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 6b399314..0b169b6d 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -288,7 +288,7 @@ We'll use the default parameterizations found in the code above. u(c, sigma) = (c^(1 - sigma) - 1) / (1 - sigma) # model constructor -function McCallModel(alpha = 0.2, +function McCallModel(;alpha = 0.2, beta = 0.98, # discount rate gamma = 0.7, c = 6.0, # unemployment compensation From 37fe69593dce84075224484fc2b95fdae50556ec Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Wed, 30 Aug 2023 20:53:59 +0000 Subject: [PATCH 05/19] Apply formatting to Markdown files --- lectures/dynamic_programming/coleman_policy_iter.md | 4 ++-- lectures/dynamic_programming/mccall_model_with_separation.md | 2 +- lectures/dynamic_programming/odu.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 6f22195e..2d760c55 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -404,7 +404,7 @@ function K!(Kg, g, grid, beta, dudc, f, f_prime, shocks) for (i, y) in enumerate(grid) function h(c) vals = dudc.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks - return dudc*c - beta * mean(vals) + return dudc * c - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end @@ -465,7 +465,7 @@ Here's an object containing data from the log-linear growth model we used in the ```{code-cell} julia isoelastic(c, gamma) = isone(gamma) ? log(c) : (c^(1 - gamma) - 1) / (1 - gamma) -function Model(;alpha = 0.65, # Productivity parameter +function Model(; alpha = 0.65, # Productivity parameter beta = 0.95, # Discount factor gamma = 1.0, # Risk aversion mu = 0.0, # First parameter in lognorm(mu, sigma) diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 0b169b6d..92db3999 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -288,7 +288,7 @@ We'll use the default parameterizations found in the code above. u(c, sigma) = (c^(1 - sigma) - 1) / (1 - sigma) # model constructor -function McCallModel(;alpha = 0.2, +function McCallModel(; alpha = 0.2, beta = 0.98, # discount rate gamma = 0.7, c = 6.0, # unemployment compensation diff --git a/lectures/dynamic_programming/odu.md b/lectures/dynamic_programming/odu.md index 51d5b400..0abca506 100644 --- a/lectures/dynamic_programming/odu.md +++ b/lectures/dynamic_programming/odu.md @@ -591,7 +591,7 @@ phi_init = ones(sp.n_pi) g(x) = res_wage_operator(sp, x) w_bar_vals = compute_fixed_point(g, phi_init) w_bar = extrapolate(interpolate((sp.pi_grid,), w_bar_vals, - Gridded(Linear())), Flat()) + Gridded(Linear())), Flat()) # Holds the employment state and beliefs of an individual agent. mutable struct Agent{TF <: AbstractFloat, TI <: Integer} From c997e23e82697a3e7a5220dfa4929342e5e94b9f Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Wed, 30 Aug 2023 17:48:10 -0400 Subject: [PATCH 06/19] small_mistakes_forget_after_lb --- lectures/dynamic_programming/coleman_policy_iter.md | 2 +- lectures/dynamic_programming/egm_policy_iter.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 2d760c55..8acd8a98 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -404,7 +404,7 @@ function K!(Kg, g, grid, beta, dudc, f, f_prime, shocks) for (i, y) in enumerate(grid) function h(c) vals = dudc.(g_func.(f(y - c) * shocks)) .* f_prime(y - c) .* shocks - return dudc * c - beta * mean(vals) + return dudc(c) - beta * mean(vals) end Kg[i] = find_zero(h, (1e-10, y - 1e-10)) end diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 6036bf7e..68fb862a 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -216,7 +216,7 @@ The first step is to bring in the model that we used in the {doc}`Coleman policy ```{code-cell} julia # model -function Model(alpha = 0.65, # productivity parameter +function Model(;alpha = 0.65, # productivity parameter beta = 0.95, # discount factor gamma = 1.0, # risk aversion mu = 0.0, # lognorm(mu, sigma) From bdaa0fd83cbc78129c9c9d29da981cba9b3a5aa5 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Wed, 30 Aug 2023 21:51:04 +0000 Subject: [PATCH 07/19] Apply formatting to Markdown files --- lectures/dynamic_programming/egm_policy_iter.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 68fb862a..c5327d6b 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -216,7 +216,7 @@ The first step is to bring in the model that we used in the {doc}`Coleman policy ```{code-cell} julia # model -function Model(;alpha = 0.65, # productivity parameter +function Model(; alpha = 0.65, # productivity parameter beta = 0.95, # discount factor gamma = 1.0, # risk aversion mu = 0.0, # lognorm(mu, sigma) From 40a29e72e5173527a59e1234822313f82ff2716c Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Fri, 1 Sep 2023 09:11:20 -0400 Subject: [PATCH 08/19] fixes --- lectures/dynamic_programming/coleman_policy_iter.md | 2 +- lectures/dynamic_programming/discrete_dp.md | 2 +- lectures/dynamic_programming/mccall_model.md | 2 +- lectures/dynamic_programming/mccall_model_with_separation.md | 2 +- lectures/dynamic_programming/perm_income_cons.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 8acd8a98..f12edd18 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -465,7 +465,7 @@ Here's an object containing data from the log-linear growth model we used in the ```{code-cell} julia isoelastic(c, gamma) = isone(gamma) ? log(c) : (c^(1 - gamma) - 1) / (1 - gamma) -function Model(; alpha = 0.65, # Productivity parameter +function Model(; alpha = 0.65, # Productivity parameter beta = 0.95, # Discount factor gamma = 1.0, # Risk aversion mu = 0.0, # First parameter in lognorm(mu, sigma) diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index 5f98c1ee..8dd88263 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -500,7 +500,7 @@ function verbose_matrices(g) @assert sum(Q[s + 1, a + 1, :]) ≈ 1 #Optional check that matrix is stochastic end end - return (;Q, R ) +return (;Q,R) end ``` diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index 5dd618f6..f63e4a77 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(c = 25.0, beta = 0.99, w) = (; c, beta, w)# named tuples +mcm(; w, c = 25.0, beta = 0.99) = (; w, c, beta)# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 92db3999..2cebe82d 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -293,7 +293,7 @@ function McCallModel(; alpha = 0.2, gamma = 0.7, c = 6.0, # unemployment compensation sigma = 2.0, - u = u, # utility function + u = (c, sigma) -> (c^(1 - sigma) - 1) / (1 - sigma), # utility function w = range(10, 20, length = 60), # wage values dist = BetaBinomial(59, 600, 400)) # distribution over wage values return (; alpha, beta, gamma, c, sigma, u, w, dist) diff --git a/lectures/dynamic_programming/perm_income_cons.md b/lectures/dynamic_programming/perm_income_cons.md index 64d05460..6bad519e 100644 --- a/lectures/dynamic_programming/perm_income_cons.md +++ b/lectures/dynamic_programming/perm_income_cons.md @@ -534,7 +534,7 @@ In the code below, we use the [LSS](https://github.com/QuantEcon/QuantEcon.jl/bl ```{code-cell} julia function income_consumption_debt_series(A, C, G, mu_0, Sigma_0, T = 150, npaths = 25) - lss = LSS(A, C < G; mu_0, Sigma_0) + lss = LSS(A, C, G; mu_0, Sigma_0) # simulation/Moment Parameters moment_generator = moment_sequence(lss) From dbc5c76f58a94b004b530fde8942e9e49ef195f8 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Fri, 1 Sep 2023 17:17:06 -0400 Subject: [PATCH 09/19] mistakes_in_local_build --- lectures/dynamic_programming/mccall_model.md | 2 +- lectures/dynamic_programming/optgrowth.md | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index f63e4a77..f90c3c31 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; w, c = 25.0, beta = 0.99) = (; w, c, beta)# named tuples +mcm(; c = 25.0, beta = 0.99,w) = (; c, beta,w=w)# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index f1297b75..ddaab5e1 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -568,12 +568,11 @@ In addition to the model parameters, we need a grid and some shock draws for Mon Random.seed!(42) # for reproducible results u(c; p) = log(c) # utility f(k; p) = k^p.alpha # deterministic part of production function -function # named tuples defaults -OptimalGrowthModel(alpha = 0.4, beta = 0.96, mu = 0.0, s = 0.1, - u = u, f = f, # defaults defined above - y = range(1e-5, 4.0, length = 200), # grid on y - Xi = exp.(mu .+ s * randn(250))) - (; alpha, beta, mu, s, u, f, y, Xi) +function OptimalGrowthModel(; alpha = 0.4, beta = 0.96, mu = 0.0, s = 0.1, + u = u, f = f, # defaults defined above + y = range(1e-5, 4.0, length = 200), # grid on y + Xi = exp.(mu .+ s * randn(250))) + return (; alpha, beta, mu, s, u, f, y, Xi) end # named tuples defaults # True value and policy function From 8cb431e138f5c497123510a1c8c8c2d7c30952ea Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Fri, 1 Sep 2023 21:20:09 +0000 Subject: [PATCH 10/19] Apply formatting to Markdown files --- lectures/dynamic_programming/mccall_model.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index f90c3c31..ff983a5c 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; c = 25.0, beta = 0.99,w) = (; c, beta,w=w)# named tuples +mcm(; c = 25.0, beta = 0.99, w) = (; c, beta, w = w)# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` From 30942ae978f4ed04adeaa69785d0e3e4e30151dd Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Fri, 1 Sep 2023 18:04:07 -0400 Subject: [PATCH 11/19] formatter_did'nt_pick_up_Xi --- lectures/dynamic_programming/optgrowth.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index ddaab5e1..1afcbc65 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -748,7 +748,7 @@ Random.seed!(42); tags: [hide-output] --- p = OptimalGrowthModel(s = 0.05) -p.ξ +p.Xi ``` Otherwise, the parameters and primitives are the same as the log linear model discussed earlier in the lecture. From ccf998c48e036838e314f76daa877c565dd16245 Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Sat, 2 Sep 2023 12:33:49 -0400 Subject: [PATCH 12/19] mcm_fix --- lectures/dynamic_programming/mccall_model.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index ff983a5c..964e4bd3 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; c = 25.0, beta = 0.99, w) = (; c, beta, w = w)# named tuples +mcm(; c = 25.0, beta = 0.99, w=w) = (; c, beta, w )# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` From aeb503d0e0f2bd9b32c4d19c0da7526e5b3da5ce Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Sat, 2 Sep 2023 16:36:46 +0000 Subject: [PATCH 13/19] Apply formatting to Markdown files --- lectures/dynamic_programming/mccall_model.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index 964e4bd3..c0294b34 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; c = 25.0, beta = 0.99, w=w) = (; c, beta, w )# named tuples +mcm(; c = 25.0, beta = 0.99, w = w) = (; c, beta, w)# named tuples compute_reservation_wage(mcm()) # call with default parameters ``` From a082cba1a1475c2b98d2db871321a6677f77f32c Mon Sep 17 00:00:00 2001 From: george-mcdonald <97567639+Jonah-Heyl@users.noreply.github.com> Date: Sat, 2 Sep 2023 12:57:06 -0400 Subject: [PATCH 14/19] 43_fix --- lectures/dynamic_programming/discrete_dp.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index 8dd88263..45d88b36 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -432,7 +432,7 @@ using BenchmarkTools, Plots, QuantEcon ``` ```{code-cell} julia -SimpleOG(B = 10, M = 5, alpha = 0.5, beta = 0.9) = (; B, M, alpha, beta) +SimpleOG(;B = 10, M = 5, alpha = 0.5, beta = 0.9) = (; B, M, alpha, beta) function transition_matrices(g) (; B, M, alpha, beta) = g From 5da3f6a042d243abcc2eb8dc41f7024ca5aa69f8 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Sat, 2 Sep 2023 17:00:20 +0000 Subject: [PATCH 15/19] Apply formatting to Markdown files --- lectures/dynamic_programming/discrete_dp.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index 45d88b36..0e0c5857 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -432,7 +432,7 @@ using BenchmarkTools, Plots, QuantEcon ``` ```{code-cell} julia -SimpleOG(;B = 10, M = 5, alpha = 0.5, beta = 0.9) = (; B, M, alpha, beta) +SimpleOG(; B = 10, M = 5, alpha = 0.5, beta = 0.9) = (; B, M, alpha, beta) function transition_matrices(g) (; B, M, alpha, beta) = g From b6bee2cbe7186ff21d68304241fde97c64958f6b Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Wed, 6 Sep 2023 10:04:33 -0700 Subject: [PATCH 16/19] Made changes based on previous comments --- format_all_directory.sh | 19 +++++++++++++++++++ lectures/dynamic_programming/career.md | 4 +--- .../coleman_policy_iter.md | 4 +--- lectures/dynamic_programming/ifp.md | 3 +-- lectures/dynamic_programming/jv.md | 4 +--- lectures/dynamic_programming/lqcontrol.md | 2 +- lectures/dynamic_programming/mccall_model.md | 2 +- .../mccall_model_with_separation.md | 10 +++------- lectures/dynamic_programming/smoothing.md | 4 ++-- 9 files changed, 30 insertions(+), 22 deletions(-) create mode 100644 format_all_directory.sh diff --git a/format_all_directory.sh b/format_all_directory.sh new file mode 100644 index 00000000..68d434bc --- /dev/null +++ b/format_all_directory.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Check if at least one argument is given +if [ "$#" -lt 1 ]; then + echo "Usage: $0 path/to/md/files/ [use_replacements]" + exit 1 +fi + +# Directory containing markdown files +dir_path=$1 + +# Optional use_replacements flag, default to "false" if not provided +use_replacements=${2:-false} + +# Loop over all .md files in the given directory +for file_path in "$dir_path"*.md; do + # Call the Julia script with the current .md file and the use_replacements flag + julia format_myst.jl "$file_path" $use_replacements +done diff --git a/lectures/dynamic_programming/career.md b/lectures/dynamic_programming/career.md index 4c9daab6..3427a329 100644 --- a/lectures/dynamic_programming/career.md +++ b/lectures/dynamic_programming/career.md @@ -196,9 +196,7 @@ function CareerWorkerProblem(; beta = 0.95, G_probs = pdf.(dist_G, support(dist_G)) F_mean = sum(theta .* F_probs) G_mean = sum(epsilon .* G_probs) - return (; beta, N, B, theta, epsilon, - F_probs, G_probs, - F_mean, G_mean) + return (; beta, N, B, theta, epsilon, F_probs, G_probs, F_mean, G_mean) end function update_bellman!(cp, v, out; ret_policy = false) diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index f12edd18..68ac68b2 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -478,9 +478,7 @@ function Model(; alpha = 0.65, # Productivity parameter dudc = c -> c^(-gamma), # u_prime f = k -> k^alpha, # production function f_prime = k -> alpha * k^(alpha - 1)) - return (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, - dudc, f, - f_prime) + return (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, dudc, f, f_prime) end ``` diff --git a/lectures/dynamic_programming/ifp.md b/lectures/dynamic_programming/ifp.md index e8b24307..5b38c984 100644 --- a/lectures/dynamic_programming/ifp.md +++ b/lectures/dynamic_programming/ifp.md @@ -397,8 +397,7 @@ function ConsumerProblem(; r = 0.01, R = 1 + r asset_grid = range(-b, grid_max, length = grid_size) - return (; r, R, beta, b, Pi, z_vals, - asset_grid) + return (; r, R, beta, b, Pi, z_vals, asset_grid) end function T!(cp, V, out; ret_policy = false) diff --git a/lectures/dynamic_programming/jv.md b/lectures/dynamic_programming/jv.md index d4ce301d..f103f6f5 100644 --- a/lectures/dynamic_programming/jv.md +++ b/lectures/dynamic_programming/jv.md @@ -210,9 +210,7 @@ function JvWorker(; A = 1.4, pi_func, F, E, epsilon) end -function T!(jv, - V, - new_V::AbstractVector) +function T!(jv, V, new_V::AbstractVector) # simplify notation (; G, pi_func, F, beta, E, epsilon) = jv diff --git a/lectures/dynamic_programming/lqcontrol.md b/lectures/dynamic_programming/lqcontrol.md index 27c6c161..2e0e97a8 100644 --- a/lectures/dynamic_programming/lqcontrol.md +++ b/lectures/dynamic_programming/lqcontrol.md @@ -545,7 +545,7 @@ to solve finite and infinite horizon linear quadratic control problems. In the module, the various updating, simulation and fixed point methods act on a type called `LQ`, which includes * Instance data: - * The required parameters $Q, R, A, B$ and optional parameters C, $\beta$, T, R_f, N specifying a given LQ model + * The required parameters $Q, R, A, B$ and optional parameters $C, \beta, T, R_f, N$ specifying a given LQ model * set $T$ and $R_f$ to `None` in the infinite horizon case * set `C = None` (or zero) in the deterministic case * the value function and policy data diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index c0294b34..921ec7ff 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; c = 25.0, beta = 0.99, w = w) = (; c, beta, w)# named tuples +mcm(; c = 25.0, beta = 0.99, w = range(10.0, 60.0, length = n+1)) = (; c, beta, w) compute_reservation_wage(mcm()) # call with default parameters ``` diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 2cebe82d..357e3438 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -244,8 +244,8 @@ function solve_mccall_model(mcm; U_iv = 1.0, V_iv = ones(length(mcm.w)), tol = 1 @assert minimum(w) > 0.0 # perhaps not strictly necessary, but useful here # necessary objects - u_w = u.(w, sigma) - u_c = u(c, sigma) + u_w = mcm.u.(w, sigma) + u_c = mcm.u(c, sigma) E = expectation(dist) # expectation operator for wage distribution # Bellman operator T. Fixed point is x* s.t. T(x*) = x* @@ -284,16 +284,12 @@ Let's plot the approximate solutions $U$ and $V$ to see what they look like. We'll use the default parameterizations found in the code above. ```{code-cell} julia -# a default utility function -u(c, sigma) = (c^(1 - sigma) - 1) / (1 - sigma) - -# model constructor function McCallModel(; alpha = 0.2, beta = 0.98, # discount rate gamma = 0.7, c = 6.0, # unemployment compensation sigma = 2.0, - u = (c, sigma) -> (c^(1 - sigma) - 1) / (1 - sigma), # utility function + u = (c, sigma) -> (c^(1 - sigma) - 1) / (1 - sigma), w = range(10, 20, length = 60), # wage values dist = BetaBinomial(59, 600, 400)) # distribution over wage values return (; alpha, beta, gamma, c, sigma, u, w, dist) diff --git a/lectures/dynamic_programming/smoothing.md b/lectures/dynamic_programming/smoothing.md index a6b006dd..4fe147b0 100644 --- a/lectures/dynamic_programming/smoothing.md +++ b/lectures/dynamic_programming/smoothing.md @@ -366,7 +366,7 @@ function ConsumptionProblem(beta = 0.96, b0 = 3.0, P = [0.8 0.2 0.4 0.6]) - (; beta, y, b0, P) + return (; beta, y, b0, P) end function consumption_complete(cp) @@ -379,7 +379,7 @@ function consumption_complete(cp) # Using equation (7) calculate b2 b2 = (y2 - y1 - (Q[1, 1] - Q[2, 1] - 1) * b1) / (Q[1, 2] + 1 - Q[2, 2]) - # Using equation (5) calculae c_bar + # Using equation (5) calculate c_bar c_bar = y1 - b0 + ([b1 b2] * Q[1, :])[1] return c_bar, b1, b2 From 9cfa98f3f77d34e16c039b00d8b085ee22d84017 Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Wed, 6 Sep 2023 10:15:10 -0700 Subject: [PATCH 17/19] Ran formatter --- README.md | 9 +++++++++ format_all_directory.sh | 13 ++++++++----- lectures/dynamic_programming/coleman_policy_iter.md | 3 ++- lectures/dynamic_programming/mccall_model.md | 2 +- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 5f171da8..fe71ad3d 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,15 @@ Julia code blocks in the myst `.md` files can be formatted using a script in thi julia format_myst.jl lectures/getting_started_julia/getting_started.md ``` +As a helper, you can call a shell script to do it for an entire folder +```bash +bash format_all_directory.sh lectures/dynamic_programming +``` +or to also do the unicode substitutions +```bash +bash format_all_directory.sh lectures/dynamic_programming true +``` + Alternatively, the formatter will run automatically when a pull-request is made diff --git a/format_all_directory.sh b/format_all_directory.sh index 68d434bc..46ab99c7 100644 --- a/format_all_directory.sh +++ b/format_all_directory.sh @@ -6,14 +6,17 @@ if [ "$#" -lt 1 ]; then exit 1 fi -# Directory containing markdown files -dir_path=$1 +# Directory containing markdown files, ensuring it has a trailing slash +dir_path="${1%/}/" # Optional use_replacements flag, default to "false" if not provided use_replacements=${2:-false} # Loop over all .md files in the given directory -for file_path in "$dir_path"*.md; do - # Call the Julia script with the current .md file and the use_replacements flag - julia format_myst.jl "$file_path" $use_replacements +for file_path in ${dir_path}*.md; do + # Only process regular files + if [ -f "$file_path" ]; then + # Call the Julia script with the current .md file and the use_replacements flag + julia format_myst.jl "$file_path" $use_replacements + fi done diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 68ac68b2..2139ff21 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -478,7 +478,8 @@ function Model(; alpha = 0.65, # Productivity parameter dudc = c -> c^(-gamma), # u_prime f = k -> k^alpha, # production function f_prime = k -> alpha * k^(alpha - 1)) - return (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, dudc, f, f_prime) + return (; alpha, beta, gamma, mu, s, grid, grid_min, grid_max, grid_size, u, + dudc, f, f_prime) end ``` diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index 921ec7ff..dbc038ab 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -427,7 +427,7 @@ This coding pattern, where `expression || error("failure)` first checks the expr Let's compute the reservation wage at the default parameters ```{code-cell} julia -mcm(; c = 25.0, beta = 0.99, w = range(10.0, 60.0, length = n+1)) = (; c, beta, w) +mcm(; c = 25.0, beta = 0.99, w = range(10.0, 60.0, length = n + 1)) = (; c, beta, w) compute_reservation_wage(mcm()) # call with default parameters ``` From f083a65b875ea0eecf8a66d5613008c237843800 Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Wed, 6 Sep 2023 17:12:37 -0700 Subject: [PATCH 18/19] Try to drop the scrollbars --- lectures/dynamic_programming/mccall_model.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index dbc038ab..e9f2a3cd 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -350,7 +350,8 @@ beta = 0.99 num_plots = 6 # Operator -T(v) = max.(w / (1 - beta), c + beta * E * v) # (5) broadcasts over the w, fixes the v +# Broadcasts over the w, fixes the v +T(v) = max.(w / (1 - beta), c + beta * E * v) # alternatively, T(v) = [max(wval/(1 - beta), c + beta * E*v) for wval in w] # fill in matrix of vs From 4c9862c3bbfa20bd477aef212cb1cc87cba6512e Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Wed, 6 Sep 2023 17:13:08 -0700 Subject: [PATCH 19/19] Turn off formatter for now --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index cf1b8994..df510876 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -3,7 +3,7 @@ name: Format Markdown Files on: pull_request: branches: - - '*' + - 'skip-for-now' # - '*' jobs: format: