@@ -3,8 +3,10 @@ jupytext:
33 text_representation :
44 extension : .md
55 format_name : myst
6+ format_version : 0.13
7+ jupytext_version : 1.14.5
68kernelspec :
7- display_name : Python 3
9+ display_name : Python 3 (ipykernel)
810 language : python
911 name : python3
1012---
@@ -29,10 +31,9 @@ kernelspec:
2931
3032In addition to what's in Anaconda, this lecture will need the following libraries:
3133
32- ``` {code-cell} ipython
33- ---
34- tags: [hide-output]
35- ---
34+ ``` {code-cell} ipython3
35+ :tags: [hide-output]
36+
3637!pip install --upgrade quantecon
3738```
3839
@@ -79,7 +80,7 @@ In reading this lecture, please don't think that our decision-maker is paranoid
7980
8081Let's start with some imports:
8182
82- ``` {code-cell} ipython
83+ ``` {code-cell} ipython3
8384import pandas as pd
8485import numpy as np
8586from scipy.linalg import eig
@@ -941,7 +942,7 @@ We compute value-entropy correspondences for two policies
941942
942943The code for producing the graph shown above, with blue being for the robust policy, is as follows
943944
944- ``` {code-cell} python3
945+ ``` {code-cell} ipython3
945946# Model parameters
946947
947948a_0 = 100
@@ -987,7 +988,7 @@ def evaluate_policy(θ, F):
987988 as well as the entropy level.
988989 """
989990
990- rlq = qe.robustlq. RBLQ(Q, R, A, B, C, β, θ)
991+ rlq = qe.RBLQ(Q, R, A, B, C, β, θ)
991992 K_F, P_F, d_F, O_F, o_F = rlq.evaluate_F(F)
992993 x0 = np.array([[1.], [0.], [0.]])
993994 value = - x0.T @ P_F @ x0 - d_F
@@ -1044,11 +1045,11 @@ def value_and_entropy(emax, F, bw, grid_size=1000):
10441045
10451046
10461047# Compute the optimal rule
1047- optimal_lq = qe.lqcontrol. LQ(Q, R, A, B, C, beta=β)
1048+ optimal_lq = qe.LQ(Q, R, A, B, C, beta=β)
10481049Po, Fo, do = optimal_lq.stationary_values()
10491050
10501051# Compute a robust rule given θ
1051- baseline_robust = qe.robustlq. RBLQ(Q, R, A, B, C, β, θ)
1052+ baseline_robust = qe.RBLQ(Q, R, A, B, C, β, θ)
10521053Fb, Kb, Pb = baseline_robust.robust_rule()
10531054
10541055# Check the positive definiteness of worst-case covariance matrix to
@@ -1189,4 +1190,3 @@ latter is just $\hat P$.
11891190``` {hint}
11901191Use the fact that $\hat P = \mathcal B( \mathcal D( \hat P))$
11911192```
1192-
0 commit comments