This R session will illustrate the incorporation of prior information in the estimation of \(\boldsymbol{\mu}\) and \(\boldsymbol{\Sigma}\).

(Useful R links: Cookbook R, Quick-R, R documentation, CRAN, METACRAN.)

Shrinkage

Suppose we have an \(N\)-dimensional i.i.d. Gaussian time series \[ \mathbf{x}_t \sim \mathcal{N}(\boldsymbol{\mu}, \boldsymbol{\Sigma}). \] The sample estimators for the mean and covariance matrix are, respectively, the sample mean \[ \hat{\boldsymbol{\mu}} = \frac{1}{T}\sum_{t=1}^T \mathbf{x}_t \] and the sample covariance matrix \[ \hat{\boldsymbol{\Sigma}} = \frac{1}{T-1}\sum_{t=1}^T (\mathbf{x}_t - \hat{\boldsymbol{\mu}})^T(\mathbf{x}_t - \hat{\boldsymbol{\mu}}). \] We will next explore the idea of shrinkage in order to improve those sample estimators when \(T\) is not large enough.

Shrinkage in \(\boldsymbol{\mu}\)

The James-Stein estimator is \[ \hat{\boldsymbol{\mu}}^\textsf{JS} = (1-\rho) \hat{\boldsymbol{\mu}} + \rho \mathbf{t} \] where \(\mathbf{t}\) is the shrinkage target and \(0\le\rho\le1\) is the amount of shrinkage. Common choices for the target \(\mathbf{t}\) are:

  • \(\mathbf{t} = \mathbf{0}\)
  • \(\mathbf{t} = 0.1 \times \mathbf{1}\)
  • \(\mathbf{t} = \frac{\mathbf{1}^T \hat{\boldsymbol{\mu}}}{N} \times \mathbf{1}\) (grand mean target)
  • \(\mathbf{t} = \frac{\mathbf{1}^T\hat{\boldsymbol{\Sigma}}^{-1}\hat{\boldsymbol{\mu}}}{\mathbf{1}^T\hat{\boldsymbol{\Sigma}}^{-1}\mathbf{1}} \times \mathbf{1}\) (volatility-weighted mean)

A choice of \(\rho\) that improves the estimatior error is \[ \rho = \frac{1}{T}\frac{N\bar{\boldsymbol{\lambda}}-2\boldsymbol{\lambda}_\textsf{max}}{\|\hat{\boldsymbol{\mu}} - \mathbf{t}\|^2} \] where \(\bar{\boldsymbol{\lambda}}\) and \(\boldsymbol{\lambda}_\textsf{max}\) are the average and maximum values, respectively, of the eigenvalues of \(\boldsymbol{\Sigma}\) (or \(\hat{\boldsymbol{\Sigma}}\)).

We will first explore this estimator using synthetic data. Let’s start generating the data:

# generate Gaussian synthetic return data
library(mvtnorm)
set.seed(357)
N <- 100
T <- 120
mu <- runif(N)
U <- t(rmvnorm(n = round(0.7*N), sigma = 0.1*diag(N)))
Sigma <- U %*% t(U) + diag(N)
X <- rmvnorm(n = T, mean = mu, sigma = Sigma)
str(X)
#>  num [1:120, 1:100] 3.57 0.19 -0.807 5.042 0.281 ...

# sample estimates (sample mean and sample covariance matrix)
mu_sm <- colMeans(X)
Sigma_scm <- cov(X)

Now let’s try the James-Stein estimator with the four targets:

# define the four targets
t_1 <- rep(0, N)
t_2 <- rep(0.1, N)
t_3 <- rep(mean(mu_sm), N)
t_4 <- rep(sum(solve(Sigma_scm, mu_sm))/sum(solve(Sigma_scm, rep(1, N))), N)

# compute the corresponding four rho's
lambdas  <- eigen(Sigma_scm)$values
lmd_mean <- mean(lambdas)
lmd_max  <- max(lambdas)
rho_1 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_1, "2")^2
rho_2 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_2, "2")^2
rho_3 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_3, "2")^2
rho_4 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_4, "2")^2

# finally the James-Stein estimators
mu_JS_1 <- (1-rho_1)*mu_sm + rho_1*t_1
mu_JS_2 <- (1-rho_2)*mu_sm + rho_2*t_2
mu_JS_3 <- (1-rho_3)*mu_sm + rho_3*t_3
mu_JS_4 <- (1-rho_4)*mu_sm + rho_4*t_4

Let’s now compare the estimation error measured in terms of squared error and PRIAL (percentage relative improvement in average loss): \[ \begin{align} \textsf{SE} &= \|\hat{\boldsymbol{\mu}}^\textsf{JS} - \boldsymbol{\mu}\|^2\\ \textsf{PRIAL} &= \frac{\|\hat{\boldsymbol{\mu}} - \boldsymbol{\mu}\|^2 - \|\hat{\boldsymbol{\mu}}^\textsf{JS} - \boldsymbol{\mu}\|^2}{\|\hat{\boldsymbol{\mu}} - \boldsymbol{\mu}\|^2} \end{align} \]

# compute squared error and PRIAL
SE_mu <- c(norm(mu_sm   - mu, "2")^2,
           norm(mu_JS_1 - mu, "2")^2,
           norm(mu_JS_2 - mu, "2")^2,
           norm(mu_JS_3 - mu, "2")^2,
           norm(mu_JS_4 - mu, "2")^2)
names(SE_mu) <- c("sample mean", 
                  "JS with 0 target", 
                  "JS with 0.1 target", 
                  "JS with GM target",
                  "JS with VW target")
print(SE_mu)
#>        sample mean   JS with 0 target JS with 0.1 target  JS with GM target  JS with VW target 
#>           5.971340           5.594483           5.274438           3.318186           3.150180
ref <- norm(mu_sm - mu, "2")^2
PRIAL_mu <- (ref - SE_mu)/ref
print(PRIAL_mu)
#>        sample mean   JS with 0 target JS with 0.1 target  JS with GM target  JS with VW target 
#>         0.00000000         0.06311096         0.11670777         0.44431464         0.47245011

#plots
barplot(SE_mu, main = "Squared error in estimation of mu", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

barplot(PRIAL_mu, main = "PRIAL in estimation of mu", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

The improvement in performance is quite clear.

We can now plot the error as a function of the number of samples to get a more complete picture:

# first generate all the data
set.seed(357)
N <- 100
T_max <- 300
mu <- runif(N)
U <- t(rmvnorm(n = round(0.7*N), sigma = 0.1*diag(N)))
Sigma <- U %*% t(U) + diag(N)
X <- rmvnorm(n = T_max, mean = mu, sigma = Sigma)

# now loop over subsets of the samples
SE_mu_vs_T <- NULL
T_sweep <- ceiling(seq(1.01*N, T_max, length.out = 20))
for (T in T_sweep) {
  X_ <- X[1:T, ]
  # sample estimates
  mu_sm <- colMeans(X_)
  Sigma_scm <- cov(X_)
  # James-Stein estimators
  t_3 <- rep(mean(mu_sm), N)
  t_4 <- rep(sum(solve(Sigma_scm, mu_sm))/sum(solve(Sigma_scm, rep(1, N))), N)
  lambdas  <- eigen(Sigma_scm)$values
  lmd_mean <- mean(lambdas)
  lmd_max  <- max(lambdas)
  rho_1 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_1, "2")^2
  rho_2 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_2, "2")^2
  rho_3 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_3, "2")^2
  rho_4 <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t_4, "2")^2
  mu_JS_1 <- (1-rho_1)*mu_sm + rho_1*t_1
  mu_JS_2 <- (1-rho_2)*mu_sm + rho_2*t_2
  mu_JS_3 <- (1-rho_3)*mu_sm + rho_3*t_3
  mu_JS_4 <- (1-rho_4)*mu_sm + rho_4*t_4
  # compute errors
  SE_mu_vs_T <- rbind(SE_mu_vs_T, c(norm(mu_sm   - mu, "2")^2,
                                    norm(mu_JS_1 - mu, "2")^2,
                                    norm(mu_JS_2 - mu, "2")^2,
                                    norm(mu_JS_3 - mu, "2")^2,
                                    norm(mu_JS_4 - mu, "2")^2))
}
colnames(SE_mu_vs_T) <- names(SE_mu)
rownames(SE_mu_vs_T) <- paste("T =", T_sweep)
# compute PRIAL
PRIAL_mu_vs_T <- (SE_mu_vs_T[, 1] - SE_mu_vs_T[, -1])/SE_mu_vs_T[, 1]

# plots
matplot(T_sweep, SE_mu_vs_T, 
        main = "Squared error in estimation of mu", xlab = "T", ylab = "squared error",
        type = "b", pch = 20, col = rainbow(5))
legend("topright", inset=0.01, legend = colnames(SE_mu_vs_T), pch = 20, col = rainbow(5))

matplot(T_sweep, PRIAL_mu_vs_T, 
        main = "PRIAL in estimation of mu", xlab = "T", ylab = "PRIAL",
        type = "b", pch = 20, col = rainbow(4))
legend("topright", inset=0.01, legend = colnames(PRIAL_mu_vs_T), pch = 20, col = rainbow(4))

Indeed, we can clearly conclude the huge improvement from using shrinkage in the mean estimation. In particular, the James-Stein estimator using as target the grand mean and the volatility weighted mean are the best by a big difference.

Shrinkage in \(\boldsymbol{\Sigma}\)

The shrinkage estimator for the covariance matrix is \[ \hat{\boldsymbol{\Sigma}}^\textsf{sh} = (1-\rho) \hat{\boldsymbol{\Sigma}} + \rho \mathbf{T} \] where \(\mathbf{T}\) is the shrinkage target and \(0\le\rho\le1\) is the amount of shrinkage.

Common choices for the target \(\mathbf{T}\) are:

  • \(\mathbf{T} = \frac{1}{N}\textsf{Tr}\left(\hat{\boldsymbol{\Sigma}}\right) \times \mathbf{I}\) (scaled identity)
  • \(\mathbf{T} = \textsf{Diag}(\hat{\boldsymbol{\Sigma}})\) (diagonal)

To determine \(\rho\) we will use three different criteria via random matrix theory (RMT):

  • MSE of covariance matrix
  • quadratic loss of precision matrix
  • Sharpe ratio.

Let’s start by generating some synthetic data:

# generate Gaussian synthetic return data
library(mvtnorm)
set.seed(357)
N <- 100
T <- 120
mu <- runif(N)
U <- t(rmvnorm(n = round(0.7*N), sigma = 0.1*diag(N)))
Sigma <- U %*% t(U) + diag(N)
X <- rmvnorm(n = T, mean = mu, sigma = Sigma)

# sample estimates (sample mean and sample covariance matrix)
mu_sm <- colMeans(X)
Sigma_scm <- cov(X)

The two targets are easily computed:

# two target matrices T=Identity and T=Diagonal
T_I <- sum(diag(Sigma_scm))/N * diag(N)
T_D <- diag(diag(Sigma_scm))

We next compute the Ledoit-Wolf estimator: \[ \rho =\min\left(1,\frac{\frac{1}{T^{2}}\sum_{t=1}^{T}||\hat{\boldsymbol{\Sigma}}-\mathbf{r}_{t}\mathbf{r}_{t}^{T}||_{F}^{2}}{||\hat{\boldsymbol{\Sigma}}-\mathbf{T}||_{F}^{2}}\right) \]

# Ledoit-Wolf shrinkage estimators with two targets 
rho_LW <- function(X, Sigma_scm, Sigma_T) {
  X <- scale(X, center = TRUE, scale = FALSE)
  T <- nrow(X)
  #tmp <- 0
  #for (t in 1:T)
  #  tmp <- tmp + norm(Sigma_scm - X[t, ] %o% X[t, ], "F")^2
  tmp <- (1-T)*norm(Sigma_scm, "F")^2 + sum(apply(X, 1, norm, "2")^4)
  rho <- min(1, (1/T^2) * tmp / norm(Sigma_scm - Sigma_T, "F")^2)
  return(rho)
}
rho <- rho_LW(X, Sigma_scm, T_I)
Sigma_LW_I <- (1-rho)*Sigma_scm + rho*T_I
rho <- rho_LW(X, Sigma_scm, T_D)
Sigma_LW_D <- (1-rho)*Sigma_scm + rho*T_D

Let’s now compare the estimation error measured in terms of squared error and PRIAL: \[ \begin{align} \textsf{SE} &= \|\hat{\boldsymbol{\Sigma}}^\textsf{sh} - \boldsymbol{\Sigma}\|_F^2\\ \textsf{PRIAL} &= \frac{\|\hat{\boldsymbol{\Sigma}} - \boldsymbol{\Sigma}\|_F^2 - \|\hat{\boldsymbol{\Sigma}}^\textsf{sh} - \boldsymbol{\Sigma}\|_F^2}{\|\hat{\boldsymbol{\Sigma}} - \boldsymbol{\Sigma}\|_F^2} \end{align} \]

# performance in terms of squared error and PRIAL
SE_Sigma <- c(norm(Sigma_scm  - Sigma, "F")^2,
              norm(Sigma_LW_I - Sigma, "F")^2,
              norm(Sigma_LW_D - Sigma, "F")^2)
names(SE_Sigma) <- c("SCM", 
                     "LW with I target", 
                     "LW with D target")
print(SE_Sigma)
#>              SCM LW with I target LW with D target 
#>         5012.484         3031.465         3073.355
ref <- norm(Sigma_scm - Sigma, "F")^2
PRIAL_Sigma <- (ref - SE_Sigma)/ref
print(PRIAL_Sigma)
#>              SCM LW with I target LW with D target 
#>        0.0000000        0.3952170        0.3868599

#plots
barplot(SE_Sigma, main = "Squared error in estimation of Sigma", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

barplot(PRIAL_Sigma, main = "PRIAL in estimation of Sigma", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

Let’s try now the shrinkage estimator based on maximizing the Sharpe ratio: \[ \begin{array}{ll} \underset{\rho_{1}\geq0}{\textsf{maximize}} & \begin{array}{c} \frac{\hat{\boldsymbol{\mu}}^{T}(\hat{\boldsymbol{\Sigma}}^{\textsf{sh}})^{-1}\hat{\boldsymbol{\mu}}-\delta}{\sqrt{b\hat{\boldsymbol{\mu}}^{T}(\hat{\boldsymbol{\Sigma}}^{\textsf{sh}})^{-1}\hat{\boldsymbol{\Sigma}}(\hat{\boldsymbol{\Sigma}}^{\textsf{sh}})^{-1}\hat{\boldsymbol{\mu}}}}\end{array}\\ \textsf{subject to} & \begin{array}[t]{l} \hat{\boldsymbol{\Sigma}}^{\textsf{sh}}=\rho_{1}\mathbf{T}+\hat{\boldsymbol{\Sigma}}\\ \delta=D/(1-D)\\ D=\frac{1}{T}\textsf{Tr}(\hat{\boldsymbol{\Sigma}}(\hat{\boldsymbol{\Sigma}}^{\textsf{sh}})^{-1})\\ b=\frac{T}{\textsf{Tr}(\mathbf{W}(\mathbf{I}+\delta\mathbf{W})^{-2})} \end{array} \end{array} \] where \(\mathbf{W}=\mathbf{I}-\frac{1}{T}\mathbf{1}\mathbf{1}^{T}\).

# Max-SR shrinkage estimators with two targets 
rho_maxSR <- function(mu_sm, Sigma_scm, Sigma_T, T) {
  W <- diag(T) - (1/T)*matrix(1, T, T)
  rho1_sweep <- exp(seq(-10, 10, length.out = 100))
  obj <- NULL
  for (rho1 in rho1_sweep) {
    Sigma_sh <- rho1*Sigma_T + Sigma_scm
    D <- (1/T)* sum(diag(Sigma_scm %*% solve(Sigma_sh)))
    delta <- D/(1-D)
    b <- T/sum(diag(W %*% solve((diag(T)+delta*W) %*% (diag(T)+delta*W))))
    inv_S_sh_mu <- solve(Sigma_sh, mu_sm)
    num <- mu_sm %*% inv_S_sh_mu - delta
    den <- sqrt(b * inv_S_sh_mu %*% Sigma_scm %*% inv_S_sh_mu)
    obj <- c(obj, num/den)
  }
  #plot(obj)
  i_max <- which.max(obj)
  rho1 <- rho1_sweep[i_max]
  return(rho1/(1 + rho1))
}
rho <- rho_maxSR(mu_sm, Sigma_scm, T_I, T)
Sigma_maxSR_I <- (1-rho)*Sigma_scm + rho*T_I
rho <- rho_maxSR(mu_sm, Sigma_scm, T_D, T)
Sigma_maxSR_D <- (1-rho)*Sigma_scm + rho*T_D

We can now evaluate the performance:

# performance in terms of squared error, SR, and PRIAL
SE_Sigma <- c(norm(Sigma_scm     - Sigma, "F")^2,
              norm(Sigma_maxSR_I - Sigma, "F")^2,
              norm(Sigma_maxSR_D - Sigma, "F")^2)
names(SE_Sigma) <- c("SCM", 
                     "Max-SR with I target", 
                     "Max-SR with D target")
print(SE_Sigma)
#>                  SCM Max-SR with I target Max-SR with D target 
#>             5012.484             3812.347             3834.279
ref <- norm(Sigma_scm - Sigma, "F")^2
PRIAL_Sigma <- (ref - SE_Sigma)/ref
print(PRIAL_Sigma)
#>                  SCM Max-SR with I target Max-SR with D target 
#>            0.0000000            0.2394296            0.2350540

compute_SR <- function(Sigma_scm, mu_sm, mu, Sigma) {
  inv_S_sh_mu <- solve(Sigma_scm, mu_sm)
  return(mu %*% inv_S_sh_mu / sqrt(inv_S_sh_mu %*% Sigma %*% inv_S_sh_mu))
}
SR_Sigma <- c(compute_SR(Sigma_scm, mu_sm, mu, Sigma),
              compute_SR(Sigma_maxSR_I, mu_sm, mu, Sigma),
              compute_SR(Sigma_maxSR_D, mu_sm, mu, Sigma))
names(SR_Sigma) <- names(SE_Sigma)
print(SR_Sigma)
#>                  SCM Max-SR with I target Max-SR with D target 
#>             1.472879             2.403143             2.372630

#plots
barplot(SE_Sigma, main = "Squared error in estimation of Sigma", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

barplot(PRIAL_Sigma, main = "PRIAL in estimation of Sigma", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

barplot(SR_Sigma, main = "Sharpe ratio", 
        col = heat.colors(5), cex.names = 0.75, las = 1)

We can now plot the performance measures as a function of the number of samples to get a more complete picture (we will use only the identity target):

# first generate all the data
set.seed(357)
N <- 100
T_max <- 300
mu <- runif(N)
U <- t(rmvnorm(n = round(0.7*N), sigma = 0.1*diag(N)))
Sigma <- U %*% t(U) + diag(N)
X <- rmvnorm(n = T_max, mean = mu, sigma = Sigma)

# now loop over subsets of the samples
SR_Sigma_vs_T <- SE_Sigma_vs_T <- NULL
T_sweep <- ceiling(seq(1.01*N, T_max, length.out = 20))
for (T in T_sweep) {
  X_ <- X[1:T, ]
  # sample estimates
  mu_sm <- colMeans(X_)
  Sigma_scm <- cov(X_)
  # methods: Ledoit-Wolf, max-SR
  T_I <- sum(diag(Sigma_scm))/N * diag(N)
  rho <- rho_LW(X_, Sigma_scm, T_I)
  Sigma_LW <- (1-rho)*Sigma_scm + rho*T_I
  rho <- rho_maxSR(mu_sm, Sigma_scm, T_I, T)
  Sigma_maxSR <- (1-rho)*Sigma_scm + rho*T_I
  # compute performance
  SE_Sigma_vs_T <- rbind(SE_Sigma_vs_T, c(norm(Sigma_scm - Sigma, "F")^2,
                                          norm(Sigma_LW - Sigma, "F")^2,
                                          norm(Sigma_maxSR - Sigma, "F")^2))
  SR_Sigma_vs_T <- rbind(SR_Sigma_vs_T, c(compute_SR(Sigma_scm, mu_sm, mu, Sigma),
                                          compute_SR(Sigma_LW, mu_sm, mu, Sigma),
                                          compute_SR(Sigma_maxSR, mu_sm, mu, Sigma)))
}
colnames(SR_Sigma_vs_T) <- colnames(SE_Sigma_vs_T) <- c("SCM", "LW", "max-SR")
rownames(SR_Sigma_vs_T) <- rownames(SE_Sigma_vs_T) <- paste("T =", T_sweep)
# compute PRIAL
PRIAL_Sigma_vs_T <- (SE_Sigma_vs_T[, 1] - SE_Sigma_vs_T[, -1])/SE_Sigma_vs_T[, 1]

# plots
matplot(T_sweep, SE_Sigma_vs_T, 
        main = "Squared error in estimation of Sigma", xlab = "T", ylab = "squared error",
        type = "b", pch = 20, col = rainbow(3))
legend("topright", inset=0.01, legend = colnames(SE_Sigma_vs_T), pch = 20, col = rainbow(3))

matplot(T_sweep, PRIAL_Sigma_vs_T, 
        main = "PRIAL in estimation of Sigma", xlab = "T", ylab = "PRIAL",
        type = "b", pch = 20, col = rainbow(2))
legend("topright", inset=0.01, legend = colnames(PRIAL_Sigma_vs_T), pch = 20, col = rainbow(2))

matplot(T_sweep, SR_Sigma_vs_T, 
        main = "Sharpe ratio", xlab = "T", ylab = "SR",
        type = "b", pch = 20, col = rainbow(3))
legend("bottomright", inset=0.01, legend = colnames(SR_Sigma_vs_T), pch = 20, col = rainbow(3))

Shrinkage in \(\boldsymbol{\mu}\) and \(\boldsymbol{\Sigma}\) with market data

We start by loading some stock market data and dividing it into a training set and test set:

library(xts)
library(quantmod)
library(PerformanceAnalytics)

# set begin-end date and stock namelist
begin_date <- "2015-01-01"
end_date <- "2017-08-31"

# load list of symbols (old version here: http://trading.chrisconlan.com/SPstocks_current.csv)
stock_namelist <- as.character(read.csv("SPstocks_current.csv", 
                                        stringsAsFactors = FALSE, header = FALSE)[, 1])
# download data from YahooFinance
N <- length(stock_namelist)
N <- 300
prices <- xts()
for (stock_index in 1:N)
  prices <- cbind(prices, Ad(getSymbols(stock_namelist[stock_index], 
                                        from = begin_date, to = end_date, auto.assign = FALSE)))
colnames(prices) <- stock_namelist[1:N]
tclass(prices) <- "Date"

# compute log-returns and linear returns
X_log <- diff(log(prices))[-1]
X_lin <- (prices/lag(prices) - 1)[-1]
N <- ncol(X_log)  # number of stocks
T <- nrow(X_log)  # number of days

# split data into training and set data
T_trn <- round(0.7*T)  # 70% of data
X_log_trn <- X_log[1:T_trn, ]
X_log_tst <- X_log[(T_trn+1):T, ]
X_lin_trn <- X_lin[1:T_trn, ]
X_lin_tst <- X_lin[(T_trn+1):T, ]

Let’s consider three estimations for \(\boldsymbol{\Sigma}\): the SCM, the shrinkage Ledoit-Wolf estimator, and the shrinkage max-SR estimator:

# estimate Sigma with the training data (SCM, Ledoit-Wolf, and max-SR)
mu_sm <- colMeans(X_log_trn)
Sigma_scm <- cov(X_log_trn)
T_I <- sum(diag(Sigma_scm))/N * diag(N)
rho <- rho_LW(X_log_trn, Sigma_scm, T_I)
Sigma_LW <- (1-rho)*Sigma_scm + rho*T_I
rho <- rho_maxSR(mu_sm, Sigma_scm, T_I, T)
Sigma_maxSR <- (1-rho)*Sigma_scm + rho*T_I

GMVP to compare shrinkage estimator for \(\boldsymbol{\Sigma}\)

We start with the Global Minimum Variance Portfolio (GMVP) since it does not require \(\boldsymbol{\mu}\) and we can focus on the estimation of \(\boldsymbol{\Sigma}\): \[ \mathbf{w}_\textsf{GMVP} = \frac{1}{\mathbf{1}^T\boldsymbol{\Sigma}^{-1}\mathbf{1}}\boldsymbol{\Sigma}^{-1}\mathbf{1} \]

# create function for GMVP
portolioGMVP <- function(Sigma) {
  ones <- rep(1, nrow(Sigma))
  Sigma_inv_1 <- solve(Sigma, ones)  #same as: inv(Sigma) %*% ones
  w <- (1/as.numeric(ones %*% Sigma_inv_1)) * Sigma_inv_1
  return(w)
}

# compute the three versions of GMVP
w_GMVP_scm   <- portolioGMVP(Sigma_scm)
w_GMVP_LW    <- portolioGMVP(Sigma_LW)
w_GMVP_maxSR <- portolioGMVP(Sigma_maxSR)
w_GMVP_all <- cbind(w_GMVP_scm, w_GMVP_LW, w_GMVP_maxSR)

# compute returns of the three portfolios
ret_GMVP_all <- xts(X_lin %*% w_GMVP_all, index(X_lin))
ret_GMVP_all_trn <- ret_GMVP_all[1:T_trn, ]
ret_GMVP_all_tst <- ret_GMVP_all[-c(1:T_trn), ]

# performance
table.AnnualizedReturns(ret_GMVP_all_trn)
#>                           w_GMVP_scm w_GMVP_LW w_GMVP_maxSR
#> Annualized Return            -0.0735   -0.0426       0.0867
#> Annualized Std Dev            0.0472    0.0513       0.1247
#> Annualized Sharpe (Rf=0%)    -1.5553   -0.8320       0.6951
table.AnnualizedReturns(ret_GMVP_all_tst)
#>                           w_GMVP_scm w_GMVP_LW w_GMVP_maxSR
#> Annualized Return             0.1950    0.1575       0.1381
#> Annualized Std Dev            0.1398    0.0992       0.0683
#> Annualized Sharpe (Rf=0%)     1.3947    1.5879       2.0209

We can see that in-sample the SCM performs the best in terms of standard deviation, obviously, while the others are similar or worse. However, out-of-sample the performance comparison is reversed and the shrinkage methods are better.

Let’s plot the wealth evolution over time:

# plots
{ chart.CumReturns(ret_GMVP_all, main = "Performance of different GMVPs", 
                   wealth.index = TRUE, legend.loc = "topleft", colorset = rich8equal)
  addEventLines(xts("training", index(X_lin[T_trn])), srt=90, pos=2, lwd = 2, col = "darkblue") }

chart.CumReturns(ret_GMVP_all_tst, main = "Out-of-sample performance of different GMVPs", 
                 wealth.index = TRUE, legend.loc = "topleft", colorset = rich8equal)

chart.Drawdown(ret_GMVP_all_tst, main = "Out-of-sample performance of different GMVPs", 
               legend.loc = "bottomleft", colorset = rich8equal)

One can see the clear superiority of the portfolio based on the shrinkage estimate whose \(\rho\) is computed using RMT in order to maximize the Sharpe ratio of the out-of-sample.

Max-Sharpe ratio portfolio to compare shrinkage estimators for \(\boldsymbol{\mu}\) and \(\boldsymbol{\Sigma}\)

Finally, let’s repeat a similar experiment but designing instead a maximum Sharpe ratio portfolio: \[ \begin{array}{ll} \underset{\mathbf{w}}{\textsf{minimize}} & \tilde{\mathbf{w}}^T\mathbf{\Sigma}\tilde{\mathbf{w}}\\ {\textsf{subject to}} & \tilde{\mathbf{w}}^T\boldsymbol{\mu} = 1\\ & \tilde{\mathbf{w}}\ge\mathbf{0} \end{array} \] and then \(\mathbf{w} = \tilde{\mathbf{w}}/(\mathbf{1}^T\tilde{\mathbf{w}})\). We will also consider the James-Stein shrinkage estimator for \(\boldsymbol{\mu}\):

# James-Stein estimator with GM target
t <- rep(mean(mu_sm), N)
lambdas  <- eigen(Sigma_scm)$values
lmd_mean <- mean(lambdas)
lmd_max  <- max(lambdas)
rho <- (1/T)*(N*lmd_mean - 2*lmd_max)/norm(mu_sm - t, "2")^2
mu_JS <- (1-rho)*mu_sm + rho*t

# now re-compute the max-SR estimator for Sigma combined with the JS estimator for mu:
T_I <- sum(diag(Sigma_scm))/N * diag(N)
rho <- rho_maxSR(mu_JS, Sigma_scm, T_I, T)
Sigma_JS_maxSR <- (1-rho)*Sigma_scm + rho*T_I
library(CVXR)
# create function for the max-SR design
portolioMaxSharpeRatio <- function(mu, Sigma) {
  w_ <- Variable(nrow(Sigma))
  prob <- Problem(Minimize(quad_form(w_, Sigma)),
                  constraints = list(w_ >= 0, t(mu) %*% w_ == 1))
  result <- solve(prob)
  return(as.vector(result$getValue(w_)/sum(result$getValue(w_))))
}

# compute seven versions of max-SR portfolio
w_maxSR_scm         <- portolioMaxSharpeRatio(mu_sm, Sigma_scm)
w_maxSR_LW          <- portolioMaxSharpeRatio(mu_sm, Sigma_LW)
w_maxSR_maxSR       <- portolioMaxSharpeRatio(mu_sm, Sigma_maxSR)
w_maxSR_JS_scm      <- portolioMaxSharpeRatio(mu_JS, Sigma_scm)
w_maxSR_JS_LW       <- portolioMaxSharpeRatio(mu_JS, Sigma_LW)
w_maxSR_JS_maxSR    <- portolioMaxSharpeRatio(mu_JS, Sigma_maxSR)
w_JS_maxSR_JS_maxSR <- portolioMaxSharpeRatio(mu_JS, Sigma_JS_maxSR)
w_maxSR_all <- cbind(w_maxSR_scm, w_maxSR_LW, w_maxSR_maxSR,
                     w_maxSR_JS_scm, w_maxSR_JS_LW, w_maxSR_JS_maxSR, w_JS_maxSR_JS_maxSR)

# compute returns of the three portfolios
ret_maxSR_all <- xts(X_lin %*% w_maxSR_all, index(X_lin))
ret_maxSR_all_trn <- ret_maxSR_all[1:T_trn, ]
ret_maxSR_all_tst <- ret_maxSR_all[-c(1:T_trn), ]

# performance
table.AnnualizedReturns(ret_maxSR_all_trn)
#>                           w_maxSR_scm w_maxSR_LW w_maxSR_maxSR w_maxSR_JS_scm w_maxSR_JS_LW w_maxSR_JS_maxSR
#> Annualized Return              0.5970     0.5970        0.3536         0.5609        0.5621           0.3252
#> Annualized Std Dev             0.1817     0.1815        0.1494         0.1727        0.1728           0.1454
#> Annualized Sharpe (Rf=0%)      3.2857     3.2893        2.3674         3.2474        3.2522           2.2362
#>                           w_JS_maxSR_JS_maxSR
#> Annualized Return                      0.2150
#> Annualized Std Dev                     0.1486
#> Annualized Sharpe (Rf=0%)              1.4469
table.AnnualizedReturns(ret_maxSR_all_tst)
#>                           w_maxSR_scm w_maxSR_LW w_maxSR_maxSR w_maxSR_JS_scm w_maxSR_JS_LW w_maxSR_JS_maxSR
#> Annualized Return              0.6228     0.6248        0.3410         0.5734        0.5765           0.3159
#> Annualized Std Dev             0.1861     0.1856        0.0943         0.1691        0.1691           0.0882
#> Annualized Sharpe (Rf=0%)      3.3466     3.3665        3.6174         3.3906        3.4091           3.5797
#>                           w_JS_maxSR_JS_maxSR
#> Annualized Return                      0.2525
#> Annualized Std Dev                     0.0798
#> Annualized Sharpe (Rf=0%)              3.1651

Again, we can see totally different result in-sample and out-of-sample. In-sample, the portfolio based on the SCM has top performance, but this vanishes out-of-sample where the portfolio based on the max-SR shines in terms of Sharpe ratio (as expected!).

Let’s plot the wealth evolution over time:

# plots
{ chart.CumReturns(ret_maxSR_all, main = "Performance of different max-SR portfolios", 
                 wealth.index = TRUE, legend.loc = "topleft", colorset = rich8equal)
  addEventLines(xts("training", index(X_lin[T_trn])), srt=90, pos=2, lwd = 2, col = "darkblue") }

chart.CumReturns(ret_maxSR_all_tst, main = "Out-of-sample performance of different max-SR portfolios", 
                 wealth.index = TRUE, legend.loc = "topleft", colorset = rich8equal)

chart.Drawdown(ret_maxSR_all_tst, main = "Out-of-sample drawdown of different max-SR portfolios", 
               legend.loc = "bottomleft", colorset = rich8equal)

The top performers by a huge difference are the ones in which \(\rho\) is obtained via the max-SR criterion. And among those three, the best is the one that fully uses the James-Stein’s estimator. Now let’s look at both the GMVP and max-SR portfolios together:

{ chart.CumReturns(cbind(ret_GMVP_all, ret_maxSR_all), 
                 main = "Performance of different max-SR portfolios", 
                 wealth.index = TRUE, legend.loc = "topleft", colorset = rich10equal)
  addEventLines(xts("training", index(X_lin[T_trn])), srt=90, pos=2, lwd = 2, col = "darkblue") }

table.AnnualizedReturns(cbind(ret_GMVP_all_tst, ret_maxSR_all_tst))
#>                           w_GMVP_scm w_GMVP_LW w_GMVP_maxSR w_maxSR_scm w_maxSR_LW w_maxSR_maxSR
#> Annualized Return             0.1950    0.1575       0.1381      0.6228     0.6248        0.3410
#> Annualized Std Dev            0.1398    0.0992       0.0683      0.1861     0.1856        0.0943
#> Annualized Sharpe (Rf=0%)     1.3947    1.5879       2.0209      3.3466     3.3665        3.6174
#>                           w_maxSR_JS_scm w_maxSR_JS_LW w_maxSR_JS_maxSR w_JS_maxSR_JS_maxSR
#> Annualized Return                 0.5734        0.5765           0.3159              0.2525
#> Annualized Std Dev                0.1691        0.1691           0.0882              0.0798
#> Annualized Sharpe (Rf=0%)         3.3906        3.4091           3.5797              3.1651

Indeed, using shrinkage clearly improves the performance and the maximum Sharpe ratio design improves over the GMVP which ignores \(\boldsymbol{\mu}\).

Black-Litterman model

The Black-Litterman model was created by Fisher Black and Robert Litterman in 1992 to resolve shortcomings of traditional Markovitz mean-variance asset allocation model. It allows to incorporate discretionary views on the expected returns to the estimation from historical data.

The log-returns are assumed normally distributed: \[ \mathbf{x}\sim\mathcal{N}(\boldsymbol{\mu}, \boldsymbol{\Sigma}), \] with the expected returns modeled as a random variable normally distributed \[ \boldsymbol{\mu}\sim\mathcal{N}(\boldsymbol{\pi}, \tau\boldsymbol{\Sigma}). \] We will consider two options for the market equilibrium \(\boldsymbol{\pi}\):

  • the sample mean: \(\boldsymbol{\pi} = \frac{1}{T}\sum_{t=1}^T \mathbf{x}_t\)
  • based on CAPM: \(\boldsymbol{\pi} = \delta \boldsymbol{\Sigma} \mathbf{w}_M\), where \(\delta = (E[R_{M,t}]-r_f)/\textsf{var}[R_{M,t}]\) is the risk aversion, \(\boldsymbol{\Sigma}\) is the covariance matrix of the returns, and \(\mathbf{w}_M\) is the normalized market capitalization.

The Black-Litterman model also introduces a mechanism to incorporate investor’s views into the input assumptions. The views are modeled as \[ \mathbf{P}\boldsymbol{\mu}\sim\mathcal{N}(\mathbf{v}, \boldsymbol{\Omega}). \] Employing Bayes formula one can finally find the posterior for the returns: \[ \mathbf{x} | \mathbf{v}, \boldsymbol{\Omega} \sim\mathcal{N}(\boldsymbol{\mu}_\textsf{BL}, \boldsymbol{\Sigma}_\textsf{BL}) \] where \[ \begin{align} \boldsymbol{\mu}_\textsf{BL} &= \boldsymbol{\pi} + \tau\boldsymbol{\Sigma}\mathbf{P}^T(\tau\mathbf{P}\boldsymbol{\Sigma}\mathbf{P}^T + \boldsymbol{\Omega})^{-1}(\mathbf{v} - \mathbf{P}\boldsymbol{\pi})\\ \boldsymbol{\Sigma}_\textsf{BL} &= (1+\tau)\boldsymbol{\Sigma} - \tau^2\boldsymbol{\Sigma}\mathbf{P}^T(\tau\mathbf{P}\boldsymbol{\Sigma}\mathbf{P}^T + \boldsymbol{\Omega})^{-1}\mathbf{P}\boldsymbol{\Sigma} \end{align} \]

Let’s start by loading the training and test sets:

library(xts)
library(quantmod)

# set begin-end date and stock namelist
begin_date <- "2010-01-01"
end_date <- "2015-12-31"
stock_namelist <- c("AAPL","AMD","ADI",  "AEZS","A",  "APD","AA","CF")
sector_namelist <- c(rep("Information Technology", 3), rep("Health Care", 2), rep("Materials", 3))
market_capitalization <- 1e6 * c(851.726/5,13.256,33.87, 60.935,23.719, 36.863,9.634,9.9)

# prepare stock data
data_set <- xts()
for (stock_index in 1:length(stock_namelist))
  data_set <- cbind(data_set, 
                    Ad(getSymbols(stock_namelist[stock_index], 
                                  from = begin_date, to = end_date, auto.assign = FALSE)))
colnames(data_set) <- stock_namelist
tclass(data_set) <- "Date"
X <- diff(log(data_set), na.pad = FALSE)
N <- ncol(X)
T <- nrow(X)

# split data into training and set data
T_trn <- round(0.7*T)
X_trn <- X[1:T_trn, ]
X_tst <- X[(T_trn+1):T, ]

Now let’s estimate the different expected returns \(\boldsymbol{\pi}\) with the training data:

# sample covariance matrix
Sigma <- cov(X_trn)

# sample mean
mu_sm <- colMeans(X_trn)

# mean estimation via CAPM
w_mkt <- market_capitalization/sum(market_capitalization)
names(w_mkt) <- colnames(X)
pie(w_mkt, 
    main = paste('Market capitalization weights'),
    col = c("purple", "violetred1", "green3", "cornsilk", "cyan", "orange", "deepskyblue", "yellow"))

x_mkt_trn <- X_trn %*% w_mkt
delta <- as.numeric(mean(x_mkt_trn)/var(x_mkt_trn))
mu_capm <- as.vector(delta * Sigma %*% w_mkt)
names(mu_capm) <- colnames(X)

Now we are ready to use Black-Litterman to compute the posterior mean and covariance of the returns:

# Black-Litterman
compute_BL <- function(pi, c_sweep) {
  tau <- 1/T_trn
  P <- rbind(c(0,0,0, 0,0, 0,0,1),
             c(0,1,0, 0,0, 0,0,0),
             c(0,0,0, 0,1, 0,0,0))
  v <- c(-0.00027, -0.00054, 0.00016)
  mu_BL <- NULL
  Sigma_BL <- list()
  for (c in c_sweep+1e-6) {
    Omega <- (1/c) * P %*% Sigma %*% t(P)
    mu_ <- pi + tau * Sigma %*% t(P) %*% solve(tau * P %*% Sigma %*% t(P) + Omega) %*% (v - P %*% pi)
    Sigma_ <- (1+tau)*Sigma - 
      (tau^2) * Sigma %*% t(P) %*% solve(tau * P %*% Sigma %*% t(P) + Omega) %*% P %*% Sigma
    mu_BL <- cbind(mu_BL, mu_)
    Sigma_BL <- c(Sigma_BL, list(Sigma_))
    }
  colnames(mu_BL) <- paste("c =", round(c_sweep))
  names(Sigma_BL) <- paste("c =", round(c_sweep))
  return(list(mu = mu_BL, Sigma = Sigma_BL))
}
c_sweep <- seq(0, T_trn/4, length.out = 10)
moments_sm_BL <- compute_BL(pi = mu_sm, c_sweep)
moments_capm_BL <- compute_BL(pi = mu_capm, c_sweep)
names(moments_sm_BL)
#> [1] "mu"    "Sigma"
print(moments_sm_BL$mu[, 1:5])
#>              c = 0        c = 29        c = 59        c = 88       c = 117
#> AAPL  0.0008904397  0.0008837948  0.0008774997  0.0008715273  0.0008658536
#> AMD  -0.0008874288 -0.0008780388 -0.0008691431 -0.0008607035 -0.0008526859
#> ADI   0.0005617340  0.0005566242  0.0005517833  0.0005471907  0.0005428277
#> AEZS -0.0012899591 -0.0012947300 -0.0012992499 -0.0013035379 -0.0013076115
#> A     0.0005666259  0.0005556360  0.0005452245  0.0005353470  0.0005259633
#> APD   0.0004674851  0.0004604106  0.0004537084  0.0004473499  0.0004413093
#> AA   -0.0002731132 -0.0002849243 -0.0002961137 -0.0003067294 -0.0003168143
#> CF    0.0011137992  0.0010763992  0.0010409677  0.0010073531  0.0009754193
names(moments_sm_BL$Sigma)
#>  [1] "c = 0"   "c = 29"  "c = 59"  "c = 88"  "c = 117" "c = 147" "c = 176" "c = 205" "c = 235" "c = 264"
print(moments_sm_BL$Sigma[[1]])
#>              AAPL          AMD          ADI         AEZS            A          APD           AA           CF
#> AAPL 3.044635e-04 0.0001949531 0.0001099568 6.153758e-05 0.0001438829 9.058085e-05 0.0001498913 0.0001505387
#> AMD  1.949531e-04 0.0010487974 0.0002788807 2.231771e-04 0.0003246662 1.993282e-04 0.0003429236 0.0002595810
#> ADI  1.099568e-04 0.0002788807 0.0002446125 1.479889e-04 0.0002107469 1.257414e-04 0.0001971688 0.0001560117
#> AEZS 6.153758e-05 0.0002231771 0.0001479889 3.335987e-03 0.0001920193 1.118835e-04 0.0001845851 0.0001312048
#> A    1.438829e-04 0.0003246662 0.0002107469 1.920193e-04 0.0004599041 1.846209e-04 0.0002833913 0.0002236858
#> APD  9.058085e-05 0.0001993282 0.0001257414 1.118835e-04 0.0001846209 1.978203e-04 0.0001848751 0.0001532401
#> AA   1.498913e-04 0.0003429236 0.0001971688 1.845851e-04 0.0002833913 1.848751e-04 0.0004660323 0.0002628625
#> CF   1.505387e-04 0.0002595810 0.0001560117 1.312048e-04 0.0002236858 1.532401e-04 0.0002628625 0.0005782824

Finally, let’s compare the different estimations in terms of \(\boldsymbol{\mu}\) in the test data:

mu_true <- colMeans(X_tst)
error_sm_BL <- colSums(abs((moments_sm_BL$mu - mu_true)^2))
print(error_sm_BL)
#>        c = 0       c = 29       c = 59       c = 88      c = 117      c = 147      c = 176      c = 205 
#> 3.712095e-05 3.695123e-05 3.679393e-05 3.664783e-05 3.651187e-05 3.638510e-05 3.626668e-05 3.615588e-05 
#>      c = 235      c = 264 
#> 3.605203e-05 3.595455e-05
plot(c_sweep, error_sm_BL, xlab = "c", type = "b", 
     main = "Estimation error in mu based on BL with sample mean")

error_capm_BL <- colSums(abs((moments_capm_BL$mu - mu_true)^2))
print(error_capm_BL)
#>        c = 0       c = 29       c = 59       c = 88      c = 117      c = 147      c = 176      c = 205 
#> 6.635839e-05 6.624785e-05 6.614470e-05 6.604826e-05 6.595792e-05 6.587313e-05 6.579343e-05 6.571838e-05 
#>      c = 235      c = 264 
#> 6.564761e-05 6.558077e-05
plot(c_sweep, error_capm_BL, xlab = "c", type = "b", 
     main = "Estimation error in mu based on BL with CAPM")

We can further design some portfolio for comparison:

library(PerformanceAnalytics)
library(CVXR)
portolioMarkowitz <- function(mu, Sigma, lmd = 0.5) {
  w <- Variable(nrow(Sigma))
  prob <- Problem(Maximize(t(mu) %*% w - lmd*quad_form(w, Sigma)),
                  constraints = list(w >= 0, sum(w) == 1))
  result <- solve(prob)
  return(as.vector(result$getValue(w)))
}

# portfolio based on BL with sample mean
w_sm_BL <- NULL
for (i in 1:length(c_sweep))
  w_sm_BL <- cbind(w_sm_BL, portolioMarkowitz(moments_sm_BL$mu[, i], moments_sm_BL$Sigma[[i]]))
colnames(w_sm_BL) <- colnames(moments_sm_BL$mu)
rownames(w_sm_BL) <- rownames(moments_sm_BL$mu)
w_sm_BL
#>              c = 0        c = 29       c = 59       c = 88       c = 117       c = 147       c = 176
#> AAPL  3.513758e-01  4.042370e-01 4.543175e-01 5.018311e-01  5.469702e-01  5.899085e-01  6.308031e-01
#> AMD   1.630741e-23  2.657562e-22 7.913191e-23 1.559449e-24 -3.473875e-23 -1.694491e-22 -1.461988e-22
#> ADI   3.251674e-23  3.935101e-23 3.456785e-23 1.120081e-22 -1.957605e-22 -3.599283e-23  1.290363e-22
#> AEZS -1.228065e-22  1.415532e-23 1.455599e-22 1.228475e-22 -1.302054e-22  1.522260e-23  1.879610e-23
#> A     2.427663e-23 -9.538184e-24 2.443414e-23 2.231858e-22 -1.489882e-22 -4.186811e-24  1.227035e-22
#> APD  -4.135510e-23  9.793576e-23 1.147499e-23 1.119455e-22 -3.159119e-23 -4.220047e-24  1.813621e-22
#> AA    4.327818e-23 -1.697105e-22 9.423570e-23 2.232611e-22  1.770059e-22 -1.651356e-22  6.101175e-23
#> CF    6.486242e-01  5.957630e-01 5.456825e-01 4.981689e-01  4.530298e-01  4.100915e-01  3.691969e-01
#>            c = 205       c = 235       c = 264
#> AAPL  6.697965e-01  7.070182e-01  7.425864e-01
#> AMD  -1.712376e-22  1.678756e-22 -1.960686e-22
#> ADI   2.989142e-23  7.329101e-23 -7.359751e-23
#> AEZS  8.202795e-24 -1.250117e-22  2.788424e-24
#> A     9.461145e-23 -4.410322e-23 -1.041303e-22
#> APD   8.763577e-23  1.462691e-23 -4.482203e-23
#> AA   -1.755150e-22 -5.022895e-23 -2.158990e-22
#> CF    3.302035e-01  2.929818e-01  2.574136e-01
chart.StackedBar(t(w_sm_BL), ylab = "w", space=0, border = NA,
                 main = "Portfolio (based on BL with sample mean) as a function of uncertainty")

# portfolio based on BL with CAMP
w_capm_BL <- NULL
for (i in 1:length(c_sweep))
  w_capm_BL <- cbind(w_capm_BL, portolioMarkowitz(moments_capm_BL$mu[, i], moments_capm_BL$Sigma[[i]]))
colnames(w_capm_BL) <- colnames(moments_capm_BL$mu)
rownames(w_capm_BL) <- rownames(moments_capm_BL$mu)
w_capm_BL
#>              c = 0        c = 29       c = 59        c = 88      c = 117       c = 147       c = 176
#> AAPL  5.235270e-01  5.321101e-01 5.363075e-01  5.378880e-01 5.383620e-01  5.388128e-01  5.392421e-01
#> AMD   6.715888e-02  4.140805e-02 1.705311e-02 -2.121208e-23 2.852196e-24 -4.650190e-24  2.814770e-23
#> ADI   1.902309e-02  3.192841e-02 3.928207e-02  4.138252e-02 3.451269e-02  2.797788e-02  2.175417e-02
#> AEZS  2.098419e-01  2.103462e-01 2.106853e-01  2.109606e-01 2.111915e-01  2.114112e-01  2.116204e-01
#> A     1.043990e-01  1.261774e-01 1.441731e-01  1.592484e-01 1.709179e-01  1.820183e-01  1.925904e-01
#> APD  -3.282820e-23 -1.114579e-22 7.138684e-03  1.363380e-02 2.098995e-02  2.798720e-02  3.465118e-02
#> AA    4.880616e-02  4.948556e-02 4.536027e-02  3.688662e-02 2.402597e-02  1.179261e-02  1.417498e-04
#> CF    2.724398e-02  8.544258e-03 1.521846e-23 -2.971673e-24 2.956247e-23 -4.557855e-23 -6.793161e-24
#>            c = 205       c = 235       c = 264
#> AAPL  5.386077e-01  5.379892e-01  5.366992e-01
#> AMD  -2.938042e-23  2.467687e-23 -3.060916e-23
#> ADI   1.309333e-02  4.792461e-03 -6.581956e-23
#> AEZS  2.117291e-01  2.118318e-01  2.118643e-01
#> A     1.996219e-01  2.062964e-01  2.117880e-01
#> APD   3.694791e-02  3.909012e-02  3.964852e-02
#> AA   -5.557612e-23 -1.162866e-22 -3.583797e-24
#> CF   -3.114394e-23 -2.087274e-23 -7.319768e-23
chart.StackedBar(t(w_capm_BL), ylab = "w", space=0, border = NA,
                 main = "Portfolio (based on BL with CAPM) as a function of uncertainty")

# compute returns of all portfolios
ret_sm_BL <- xts(X %*% w_sm_BL, index(X))
ret_sm_BL_trn <- ret_sm_BL[1:T_trn, ]
ret_sm_BL_tst <- ret_sm_BL[-c(1:T_trn), ]
ret_capm_BL <- xts(X %*% w_capm_BL, index(X))
ret_capm_BL_trn <- ret_capm_BL[1:T_trn, ]
ret_capm_BL_tst <- ret_capm_BL[-c(1:T_trn), ]
# performance
table.AnnualizedReturns(ret_sm_BL_trn)
#>                            c = 0 c = 29 c = 59 c = 88 c = 117 c = 147 c = 176 c = 205 c = 235 c = 264
#> Annualized Return         0.2421 0.2419 0.2412 0.2401  0.2387  0.2370  0.2350  0.2329  0.2306  0.2282
#> Annualized Std Dev        0.2966 0.2871 0.2792 0.2727  0.2676  0.2636  0.2608  0.2589  0.2579  0.2577
#> Annualized Sharpe (Rf=0%) 0.8163 0.8424 0.8637 0.8803  0.8919  0.8989  0.9013  0.8996  0.8943  0.8857
table.AnnualizedReturns(ret_sm_BL_tst)
#>                            c = 0 c = 29 c = 59 c = 88 c = 117 c = 147 c = 176 c = 205 c = 235 c = 264
#> Annualized Return         0.0271 0.0425 0.0570 0.0706  0.0834  0.0955  0.1069  0.1177  0.1280  0.1376
#> Annualized Std Dev        0.2389 0.2313 0.2252 0.2205  0.2171  0.2148  0.2135  0.2132  0.2136  0.2148
#> Annualized Sharpe (Rf=0%) 0.1136 0.1838 0.2530 0.3201  0.3843  0.4447  0.5008  0.5523  0.5990  0.6409
table.AnnualizedReturns(ret_capm_BL_trn)
#>                            c = 0 c = 29 c = 59 c = 88 c = 117 c = 147 c = 176 c = 205 c = 235 c = 264
#> Annualized Return         0.0132 0.0208 0.0300 0.0384  0.0409  0.0433  0.0456  0.0453  0.0450  0.0448
#> Annualized Std Dev        0.2903 0.2890 0.2877 0.2869  0.2871  0.2873  0.2875  0.2880  0.2884  0.2887
#> Annualized Sharpe (Rf=0%) 0.0453 0.0719 0.1044 0.1338  0.1425  0.1507  0.1585  0.1574  0.1561  0.1552
table.AnnualizedReturns(ret_capm_BL_tst)
#>                             c = 0  c = 29  c = 59  c = 88 c = 117 c = 147 c = 176 c = 205 c = 235 c = 264
#> Annualized Return         -0.2856 -0.2806 -0.2761 -0.2729 -0.2719 -0.2710 -0.2701 -0.2706 -0.2711 -0.2715
#> Annualized Std Dev         0.3438  0.3438  0.3435  0.3432  0.3430  0.3428  0.3428  0.3430  0.3432  0.3433
#> Annualized Sharpe (Rf=0%) -0.8307 -0.8162 -0.8037 -0.7953 -0.7929 -0.7905 -0.7882 -0.7891 -0.7901 -0.7910

chart.CumReturns(ret_sm_BL_tst, 
            main = "Portfolios (based on BL with sample mean) as a function of uncertaintly of views", 
            wealth.index = TRUE, legend.loc = "topleft", colorset = rich10equal)

chart.CumReturns(ret_capm_BL_tst, 
            main = "Portfolios (based on BL with CAPM) as a function of uncertaintly of views", 
            wealth.index = TRUE, legend.loc = "topleft", colorset = rich10equal)