@@ -3,8 +3,10 @@ jupytext:
33 text_representation :
44 extension : .md
55 format_name : myst
6+ format_version : 0.13
7+ jupytext_version : 1.16.4
68kernelspec :
7- display_name : Python 3
9+ display_name : Python 3 (ipykernel)
810 language : python
911 name : python3
1012---
@@ -79,15 +81,14 @@ adjust investors' subjective beliefs about mean returns in order to render more
7981
8082Let's start with some imports:
8183
82- ``` {code-cell} ipython
84+ ``` {code-cell} ipython3
8385import numpy as np
8486import scipy.stats as stat
8587import matplotlib.pyplot as plt
88+ from numba import jit
8689from ipywidgets import interact, FloatSlider
8790```
8891
89-
90-
9192## Mean-Variance Portfolio Choice
9293
9394A risk-free security earns one-period net return $r_f$.
@@ -169,7 +170,7 @@ $w$'s with **extreme long and short positions**.
169170A common reaction to these outcomes is that they are so implausible that a portfolio
170171manager cannot recommend them to a customer.
171172
172- ``` {code-cell} python3
173+ ``` {code-cell} ipython3
173174np.random.seed(12)
174175
175176N = 10 # Number of assets
@@ -300,7 +301,7 @@ The starting point of the Black-Litterman portfolio choice model is thus
300301a pair $(\delta_m, \mu_m)$ that tells the customer to hold the
301302market portfolio.
302303
303- ``` {code-cell} python3
304+ ``` {code-cell} ipython3
304305# Observed mean excess market return
305306r_m = w_m @ μ_est
306307
@@ -316,11 +317,12 @@ d_m = r_m / σ_m
316317# Derive "view" which would induce the market portfolio
317318μ_m = (d_m * Σ_est @ w_m).reshape(N, 1)
318319
320+ x = np.arange(N) + 1
319321fig, ax = plt.subplots(figsize=(8, 5))
320322ax.set_title(r'Difference between $\hat{\mu}$ (estimate) and $\mu_{BL}$ (market implied)')
321- ax.plot(np.arange(N)+1 , μ_est, 'o', c='k', label='$\hat{\mu}$')
322- ax.plot(np.arange(N)+1 , μ_m, 'o', c='r', label='$\mu_{BL}$')
323- ax.vlines(np.arange(N) + 1 , μ_m, μ_est, lw=1)
323+ ax.plot(x , μ_est, 'o', c='k', label='$\hat{\mu}$')
324+ ax.plot(x , μ_m, 'o', c='r', label='$\mu_{BL}$')
325+ ax.vlines(x , μ_m, μ_est, lw=1)
324326ax.axhline(0, c='k', ls='--')
325327ax.set_xlabel('Assets')
326328ax.xaxis.set_ticks(np.arange(1, N+1, 1))
@@ -384,7 +386,7 @@ If $\hat \mu$ is the maximum likelihood estimator
384386and $\tau$ is chosen heavily to weight this view, then the
385387customer's portfolio will involve big short-long positions.
386388
387- ``` {code-cell} python3
389+ ``` {code-cell} ipython3
388390def black_litterman(λ, μ1, μ2, Σ1, Σ2):
389391 """
390392 This function calculates the Black-Litterman mixture
@@ -607,7 +609,7 @@ $\bar d_2$ on the RHS of the constraint, by varying
607609$\bar d_2$ (or $\lambda$ ), we can trace out the whole curve
608610as the figure below illustrates.
609611
610- ``` {code-cell} python3
612+ ``` {code-cell} ipython3
611613np.random.seed(1987102)
612614
613615N = 2 # Number of assets
@@ -649,13 +651,9 @@ def decolletage(λ):
649651 dist_r_hat = stat.multivariate_normal(μ_est.squeeze(), τ * Σ_est)
650652
651653 X, Y = np.meshgrid(r1, r2)
652- Z_BL = np.zeros((N_r1, N_r2))
653- Z_hat = np.zeros((N_r1, N_r2))
654-
655- for i in range(N_r1):
656- for j in range(N_r2):
657- Z_BL[i, j] = dist_r_BL.pdf(np.hstack([X[i, j], Y[i, j]]))
658- Z_hat[i, j] = dist_r_hat.pdf(np.hstack([X[i, j], Y[i, j]]))
654+ XY = np.stack((X, Y), axis=-1)
655+ Z_BL = dist_r_BL.pdf(XY)
656+ Z_hat = dist_r_hat.pdf(XY)
659657
660658 μ_tilde = black_litterman(λ, μ_m, μ_est, Σ_est, τ * Σ_est).flatten()
661659
@@ -692,7 +690,7 @@ This leads to the
692690following figure, on which the curve connecting $\hat \mu$
693691and $\mu_ {BL}$ are bending
694692
695- ``` {code-cell} python3
693+ ``` {code-cell} ipython3
696694λ_grid = np.linspace(.001, 20000, 1000)
697695curve = np.asarray([black_litterman(λ, μ_m, μ_est, Σ_est,
698696 τ * np.eye(N)).flatten() for λ in λ_grid])
@@ -705,13 +703,9 @@ def decolletage(λ):
705703 dist_r_hat = stat.multivariate_normal(μ_est.squeeze(), τ * np.eye(N))
706704
707705 X, Y = np.meshgrid(r1, r2)
708- Z_BL = np.zeros((N_r1, N_r2))
709- Z_hat = np.zeros((N_r1, N_r2))
710-
711- for i in range(N_r1):
712- for j in range(N_r2):
713- Z_BL[i, j] = dist_r_BL.pdf(np.hstack([X[i, j], Y[i, j]]))
714- Z_hat[i, j] = dist_r_hat.pdf(np.hstack([X[i, j], Y[i, j]]))
706+ XY = np.stack((X, Y), axis=-1)
707+ Z_BL = dist_r_BL.pdf(XY)
708+ Z_hat = dist_r_hat.pdf(XY)
715709
716710 μ_tilde = black_litterman(λ, μ_m, μ_est, Σ_est, τ * np.eye(N)).flatten()
717711
@@ -1247,7 +1241,7 @@ observations is related to the sampling frequency
12471241
12481242- Moreover, for a fixed lag length, $n$, the dependence vanishes as the sampling frequency goes to infinity. In fact, letting $h$ go to $\infty$ gives back the case of IID data.
12491243
1250- ``` {code-cell} python3
1244+ ``` {code-cell} ipython3
12511245μ = .0
12521246κ = .1
12531247σ = .5
@@ -1346,7 +1340,8 @@ thus getting an idea about how the asymptotic relative MSEs changes in
13461340the sampling frequency $h$ relative to the IID case that we
13471341compute in closed form.
13481342
1349- ``` {code-cell} python3
1343+ ``` {code-cell} ipython3
1344+ @jit
13501345def sample_generator(h, N, M):
13511346 ϕ = (1 - np.exp(-κ * h)) * μ
13521347 ρ = np.exp(-κ * h)
@@ -1355,18 +1350,18 @@ def sample_generator(h, N, M):
13551350 mean_uncond = μ
13561351 std_uncond = np.sqrt(σ**2 / (2 * κ))
13571352
1358- ε_path = stat.norm (0, np.sqrt(s)).rvs( (M, N))
1353+ ε_path = np.random.normal (0, np.sqrt(s), (M, N))
13591354
13601355 y_path = np.zeros((M, N + 1))
1361- y_path[:, 0] = stat.norm (mean_uncond, std_uncond).rvs( M)
1356+ y_path[:, 0] = np.random.normal (mean_uncond, std_uncond, M)
13621357
13631358 for i in range(N):
13641359 y_path[:, i + 1] = ϕ + ρ * y_path[:, i] + ε_path[:, i]
13651360
13661361 return y_path
13671362```
13681363
1369- ``` {code-cell} python3
1364+ ``` {code-cell} ipython3
13701365# Generate large sample for different frequencies
13711366N_app, M_app = 1000, 30000 # Sample size, number of simulations
13721367h_grid = np.linspace(.1, 80, 30)
0 commit comments