From c71423f1c01a5ae72b5f33e4370231e24b46f1d3 Mon Sep 17 00:00:00 2001 From: Tyler Pike Date: Thu, 22 Jul 2021 16:37:26 -0400 Subject: [PATCH] Version 1.2.0 --- .Rbuildignore | 1 + DESCRIPTION | 6 +- NAMESPACE | 3 + NEWS.md | 14 +- R/analysis_wrappers.R | 264 +++++ R/development_test.R | 803 ------------- R/helper.R | 98 ++ R/lp.R | 15 +- R/plot_irf.R | 4 +- R/regimes.R | 1 - R/sovle_b.R | 218 ++++ R/var.R | 213 ++-- R/var_fevd.R | 105 +- R/var_hd.R | 64 +- R/var_irf.R | 778 ++++++------ README.md | 30 +- _pkgdown.yml | 11 +- cran-comments.md | 15 + development_test.R | 1047 ----------------- docs/LICENSE.html | 2 +- docs/authors.html | 2 +- docs/index.html | 6 +- docs/reference/FEVD.html | 264 +++++ docs/reference/HD.html | 256 ++++ docs/reference/LP.html | 9 +- docs/reference/RLP.html | 23 +- docs/reference/RVAR.html | 102 +- docs/reference/VAR.html | 18 +- docs/reference/index.html | 40 +- docs/reference/irf.html | 291 +++++ docs/reference/lp_irf.html | 2 +- docs/reference/rvar_fevd.html | 16 +- docs/reference/rvar_hd.html | 22 +- docs/reference/rvar_irf.html | 60 +- docs/reference/var_fevd.html | 16 +- docs/reference/var_hd.html | 20 +- docs/reference/var_irf.html | 44 +- docs/to-do.html | 104 +- man/FEVD.Rd | 63 + man/HD.Rd | 59 + man/IRF.Rd | 86 ++ man/LP.Rd | 5 + man/RLP.Rd | 14 +- man/RVAR.Rd | 82 +- man/VAR.Rd | 8 +- man/rvar_fevd.Rd | 3 +- man/rvar_hd.Rd | 6 +- man/rvar_irf.Rd | 38 +- man/var_fevd.Rd | 3 +- man/var_hd.Rd | 5 +- man/var_irf.Rd | 25 +- tests/testthat/Rplots.pdf | Bin 29928 -> 20113 bytes tests/testthat/test-lp.R | 1 + ...test-threvhold_var.R => test-regime_var.R} | 4 +- tests/testthat/test-structure.R | 197 ++++ tests/testthat/test-var.R | 4 +- vignettes/getting_started.Rmd | 4 +- 57 files changed, 2884 insertions(+), 2710 deletions(-) create mode 100644 R/analysis_wrappers.R delete mode 100644 R/development_test.R create mode 100644 R/helper.R create mode 100644 R/sovle_b.R create mode 100644 cran-comments.md delete mode 100644 development_test.R create mode 100644 docs/reference/FEVD.html create mode 100644 docs/reference/HD.html create mode 100644 docs/reference/irf.html create mode 100644 man/FEVD.Rd create mode 100644 man/HD.Rd create mode 100644 man/IRF.Rd rename tests/testthat/{test-threvhold_var.R => test-regime_var.R} (96%) create mode 100644 tests/testthat/test-structure.R diff --git a/.Rbuildignore b/.Rbuildignore index 0db7381..442bcbb 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -9,3 +9,4 @@ ^to-do.md$ ^cran-comments.md$ ^CRAN-RELEASE$ +^development_test.R$ \ No newline at end of file diff --git a/DESCRIPTION b/DESCRIPTION index ee1d74c..3363496 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: sovereign Title: State-Dependent Empirical Analysis -Version: 1.1.0 +Version: 1.2.0 Authors@R: person(given = "Tyler J.", family = "Pike", @@ -19,7 +19,9 @@ Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Imports: broom, - dplyr, + dplyr, + future, + furrr, ggplot2, gridExtra, lmtest, diff --git a/NAMESPACE b/NAMESPACE index f3087a4..dcf1a6e 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,6 +1,9 @@ # Generated by roxygen2: do not edit by hand export("%>%") +export(FEVD) +export(HD) +export(IRF) export(LP) export(RLP) export(RVAR) diff --git a/NEWS.md b/NEWS.md index 43239dc..569de0b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -12,12 +12,18 @@ - no structural restrictions, - short-term restrictions (via Cholesky decomposition), - instrumental variable estimation - - combination of instrument variable and short-term restricions - - The choice run bootstrapping routines in parallel for greater computational efficiency + - combination of instrument variable and short-term restrictions + - Parallel processing for greater computational efficiency in computing bootstrapped confidence intervals + +- Minor updates + - IRF plots use darker blue for CI + - Analysis function wrappers implemented for IRFs, FEVDs, and HDs + - Updated model documentation with academic references and corrected minor errors - Bug fixes - plot_irf() plots all targets by default - - test-threvhold_var.R changed to test-threshold_var.R + - test-threvhold_var.R changed to test-regime_var.R + - Fixed IRF results format listed in documentation ## Version 1.1.0 (2021-06-01) @@ -33,7 +39,7 @@ - Clarify model specification of RVAR - Bug fixes - - Convert errorConditiont() to stop() + - Convert errorCondition() to stop() ## Version 1.0.0 (2021-04-02) diff --git a/R/analysis_wrappers.R b/R/analysis_wrappers.R new file mode 100644 index 0000000..e0658fc --- /dev/null +++ b/R/analysis_wrappers.R @@ -0,0 +1,264 @@ +#' Estimate impulse response functions +#' +#' See VAR, RVAR, LP, and RLP documentation for details +#' regarding models and structural errors. +#' +#' @param model VAR, RVAR, LP, or RLP class object +#' @param horizon int: number of periods +#' @param CI numeric vector: c(lower ci bound, upper ci bound) +#' @param bootstrap.type string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used +#' @param bootstrap.num int: number of bootstraps +#' @param bootstrap.parallel boolean: create IRF draws in parallel +#' @param bootstrap.cores int: number of cores to use in parallel processing; -1 detects and uses half the available cores +#' +#' @return data frame with columns `target`, `shock`, `horizon`, `response.lower`, `response`, `response.upper`; regime-based models return a list with a data frame per regime. +#' +#' @seealso [var_irf()] +#' @seealso [rvar_irf()] +#' @seealso [lp_irf()] +#' @seealso [rlp_irf()] +#' +#' @examples +#' \donttest{ +#' +#' # simple time series +#' AA = c(1:100) + rnorm(100) +#' BB = c(1:100) + rnorm(100) +#' CC = AA + BB + rnorm(100) +#' date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) +#' Data = data.frame(date = date, AA, BB, CC) +#' +#' # estimate VAR +#' var = +#' sovereign::VAR( +#' data = Data, +#' horizon = 10, +#' freq = 'month', +#' lag.ic = 'BIC', +#' lag.max = 4 +#' ) +#' +#' # impulse response function +#' var.irf = sovereign::IRF(var) +#' +#' # local projection forecasts +#' lp = +#' sovereign::LP( +#' data = Data, +#' horizon = c(1:10), +#' lag.ic = 'AIC', +#' lag.max = 4, +#' type = 'both', +#' freq = 'month') +#' +#' # LP impulse response function +#' lp.irf = sovereign::IRF(lp) +#' +#' } +#' +#' @export + +IRF = function( + model, # VAR, RVAR, LP, or RLP class object + horizon = 10, # int: number of periods + CI = c(0.1, 0.9), # numeric vector: c(lower ci bound, upper ci bound) + bootstrap.type = 'auto', # string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used + bootstrap.num = 100, # int: number of bootstraps + bootstrap.parallel = FALSE, # boolean: create IRF draws in parallel + bootstrap.cores = -1 # int: number of cores to use in parallel processing; -1 detects and uses half the available cores +){ + + if(class(model) == 'VAR'){ + return( + var_irf( + var = model, + horizon = horizon, + CI = CI, + bootstrap.type = bootstrap.type, + bootstrap.num = bootstrap.num, + bootstrap.parallel = bootstrap.parallel, + bootstrap.cores = bootstrap.cores + ) + ) + }else if(class(model) == 'RVAR'){ + return( + rvar_irf( + rvar = model, + horizon = horizon, + CI = CI, + bootstrap.type = bootstrap.type, + bootstrap.num = bootstrap.num, + bootstrap.parallel = bootstrap.parallel, + bootstrap.cores = bootstrap.cores + ) + ) + }else if(class(model) == 'LP'){ + return( + lp_irf( + lp = model, + CI = CI + ) + ) + }else if(class(model) == 'RLP'){ + return( + rlp_irf( + rlp = model, + CI = CI + ) + ) + }else{ + stop('model must be a sovereign VAR, RVAR, LP, or RLP class object') + } + +} + +#' Estimate forecast error variance decomposition +#' +#' Estimate the forecast error variance decomposition for VARs with +#' either short or 'IV-short' structural errors. See VAR +#' and RVAR documentation for details regarding structural errors. +#' +#' @param model VAR or RVAR class object +#' @param horizon int: number of periods +#' @param scale boolean: scale variable contribution as percent of total error +#' +#' @return long-form data.frame +#' +#' @seealso [VAR()] +#' @seealso [var_fevd()] +#' @seealso [RVAR()] +#' @seealso [rvar_fevd()] +#' +#' @examples +#' \donttest{ +#' +#' # simple time series +#' AA = c(1:100) + rnorm(100) +#' BB = c(1:100) + rnorm(100) +#' CC = AA + BB + rnorm(100) +#' date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) +#' Data = data.frame(date = date, AA, BB, CC) +#' +#' # estimate VAR +#' var = +#' sovereign::VAR( +#' data = Data, +#' horizon = 10, +#' freq = 'month', +#' lag.ic = 'BIC', +#' lag.max = 4) +#' +#' # impulse response functions +#' var.irf = sovereign::IRF(var) +#' +#' # forecast error variance decomposition +#' var.fevd = sovereign::FEVD(var) +#' +#' # historical shock decomposition +#' var.hd = sovereign::HD(var) +#' +#' } +#' +#' @export + +FEVD = function( + model, # VAR or RVAR class object + horizon = 10, # int: number of periods + scale = TRUE # boolean: scale variable contribution as percent of total error +){ + + if(class(model) == 'VAR'){ + return( + var_fevd( + var = model, + horizon = horizon, + scale = scale + ) + ) + }else if(class(model) == 'RVAR'){ + return( + rvar_fevd( + rvar = model, + horizon = horizon, + scale = scale + ) + ) + }else{ + stop('model must be a sovereign VAR or RVAR class object') + } + +} + + +#------------------------------------------------------- +# Function calculate historical decomposition of shocks +#------------------------------------------------------- + +#' Estimate historical decomposition +#' +#' Estimate the historical decomposition for VARs with +#' either 'short' or 'IV-short' structural errors. See VAR +#' and RVAR documentation for details regarding structural errors. +#' +#' @param model VAR or RVAR class object +#' +#' @return long-from data.frame +#' +#' @seealso [VAR()] +#' @seealso [var_hd()] +#' @seealso [RVAR()] +#' @seealso [rvar_hd()] +#' +#' @examples +#' \donttest{ +#' +#' # simple time series +#' AA = c(1:100) + rnorm(100) +#' BB = c(1:100) + rnorm(100) +#' CC = AA + BB + rnorm(100) +#' date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) +#' Data = data.frame(date = date, AA, BB, CC) +#' +#' # estimate VAR +#' var = +#' sovereign::VAR( +#' data = Data, +#' horizon = 10, +#' freq = 'month', +#' lag.ic = 'BIC', +#' lag.max = 4) +#' +#' # impulse response functions +#' var.irf = sovereign::IRF(var) +#' +#' # forecast error variance decomposition +#' var.fevd = sovereign::FEVD(var) +#' +#' # historical shock decomposition +#' var.hd = sovereign::HD(var) +#' +#' } +#' +#' @export + +HD = function( + model # VAR or RVAR class object +){ + + if(class(model) == 'VAR'){ + return( + var_hd( + var = model + ) + ) + }else if(class(model) == 'RVAR'){ + return( + rvar_hd( + rvar = model + ) + ) + }else{ + stop('model must be a sovereign VAR or RVAR class object') + } + +} diff --git a/R/development_test.R b/R/development_test.R deleted file mode 100644 index aef04af..0000000 --- a/R/development_test.R +++ /dev/null @@ -1,803 +0,0 @@ - -TVAR = function( - data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors - threshold, # string: variable to estimate threshold values - horizon = 10, # int: forecast horizons - freq = 'month', # string: frequency of data (day, week, month, quarter, year) - type = 'const', # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') - p = 1, # int: lags - lag.ic = NULL, # string: information criterion to choose the optimal number of lags ('AIC' or 'BIC') - lag.max = NULL, # int: maximum number of lags to test in lag selection - structure = 'short', # string: type of structural identification strategy to use in model analysis (NULL, 'short', or 'IV') - instrument = NULL, # string: name of instrumental variable contained in the data matrix - instrumented = NULL # string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data -){ - - # function warnings - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.null(lag.ic)){ - if(!lag.ic %in% c('BIC','AIC')){ - stop("lag.ic must be either 'BIC', 'AIC', or NULL") - } - } - if(!is.null(lag.max)){ - if(lag.max %% 1 != 0){ - stop('lag.max must be an integer if IC-based lag selection is used') - } - } - if(!is.matrix(data) & !is.data.frame(data)){ - stop('data must be a matrix or data.frame') - } - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - if(!freq %in% c('day','week','month','quarter','year')){ - stop("freq must be one of the following strings: 'day','week','month','quarter','year'") - } - if(!structure %in% c('short', 'IV', 'IV-short') & !is.null(structure)){ - stop("strucutre must be one of 'strucutre', 'IV', 'IV-short', or NULL.") - } - if(!is.null(instrument)){ - if(!instrument %in% colnames(data)){ - stop("instrument must be the name of a variable found in data.") - } - } - if(!is.null(instrumented)){ - if(!instrumented %in% colnames(data)){ - stop("instrumented must be the name of a variable found in data.") - } - } - - # cast as data frame if ts, xts, or zoo object - if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ - data = data.frame(date = zoo::index(date), data) - } - - # set aside instruments - if(!is.null(instrument)){ - data.instrument = dplyr::select(data, date, dplyr::all_of(instrument)) - data = dplyr::select(data, -dplyr::all_of(instrument)) - }else{ - data.instrument = NULL - } - - # detect variable to be instrumented - if(is.null(instrumented)){ - var_to_instrument = colnames(dplyr::select(data, -date))[1] - }else{ - var_to_instrument = instrumented - } - - - for(t in unique(data[,c(threshold)])){ - - data.threshold = data %>% - mutate(t_regime = if_else(threshold >= t, 1, 0)) - - rvar = - RVAR( - data = data.threshold, - horizon = horizon, - freq = freq, - type = type, - p = p, - lag.ic = lag.ic, - lag.max = lag.max, - regime = t_regime, - ) - - rvar.ll = rvar$ll - - } - - -} - - -VAR = function( - data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors - horizon = 10, # int: forecast horizons - freq = 'month', # string: frequency of data (day, week, month, quarter, year) - type = 'const', # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') - p = 1, # int: lags - lag.ic = NULL, # string: information criterion to choose the optimal number of lags ('AIC' or 'BIC') - lag.max = NULL, # int: maximum number of lags to test in lag selection - structure = 'short', # string: type of structural identification strategy to use in model analysis (NULL, 'short', or 'IV') - instrument = NULL, # string: name of instrumental variable contained in the data matrix - instrumented = NULL # string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data -){ - - # function warnings - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.null(lag.ic)){ - if(!lag.ic %in% c('BIC','AIC')){ - stop("lag.ic must be either 'BIC', 'AIC', or NULL") - } - } - if(!is.null(lag.max)){ - if(lag.max %% 1 != 0){ - stop('lag.max must be an integer if IC-based lag selection is used') - } - } - if(!is.matrix(data) & !is.data.frame(data)){ - stop('data must be a matrix or data.frame') - } - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - if(!freq %in% c('day','week','month','quarter','year')){ - stop("freq must be one of the following strings: 'day','week','month','quarter','year'") - } - if(!structure %in% c('short', 'IV', 'IV-short') & !is.null(structure)){ - stop("strucutre must be one of 'strucutre', 'IV', 'IV-short', or NULL.") - } - if(!is.null(instrument)){ - if(!instrument %in% colnames(data)){ - stop("instrument must be the name of a variable found in data.") - } - } - if(!is.null(instrumented)){ - if(!instrumented %in% colnames(data)){ - stop("instrumented must be the name of a variable found in data.") - } - } - - # cast as data frame if ts, xts, or zoo object - if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ - data = data.frame(date = zoo::index(date), data) - } - - # set aside instruments - if(!is.null(instrument)){ - data.instrument = dplyr::select(data, date, dplyr::all_of(instrument)) - data = dplyr::select(data, -dplyr::all_of(instrument)) - }else{ - data.instrument = NULL - } - - # detect variable to be instrumented - if(is.null(instrumented)){ - var_to_instrument = colnames(dplyr::select(data, -date))[1] - }else{ - var_to_instrument = instrumented - } - - # VAR estimation - if(!is.null(lag.ic)){ - - ic.scores = vector(length = lag.max+1) - - models = c(1:lag.max) %>% - purrr::map(.f = function(p){ - - # estimate candidate model - model = - VAR_estimation( - data = data, - p = p, - horizon = horizon, - freq = freq, - type = type - ) - - # calculate IC - ic.score = - IC( - ic = lag.ic, - errors = model$residuals[[1]], - data = data, - p = p - ) - - ic.scores[p] = ic.score - - # return candidate model - return(model) - - }) - - # return IC minimizing VAR - min.ic = which.min(ic.scores) - model = models[[min.ic]] - - }else{ - - model = - VAR_estimation( - data = data, - p = p, - horizon = horizon, - freq = freq, - type = type - ) - - } - - # add structure - model$structure = structure - model$instrument = data.instrument - model$instrumented = var_to_instrument - - # assign class and return - class(model) = 'VAR' - return(model) - -} - -##################################################################### -# IRF FUNCTIONS - -solve_B = function(var, report_iv = TRUE){ - - if(is.null(var$structure) == TRUE){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - B = cov.matrix - - return(B) - - }else if(var$structure == 'short'){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - # take cholesky decomposition - B = t(chol(cov.matrix)) - - return(B) - - }else if(var$structure == 'IV'){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - covariates = colnames(dplyr::select(data.residuals, -date, -forecast.date)) - col_to_instrument = var$instrumented - - # retrieve instrument - data.instrument = var$instrument - instrument = colnames(dplyr::select(data.instrument, -date)) - - # combine data - data = - dplyr::inner_join(data.residuals, data.instrument, by = 'date') %>% - na.omit() - - # first stage least squares - model.first_stage = - lm(col_to_instrument ~., - data = data %>% - select(col_to_instrument = dplyr::all_of(col_to_instrument), all_of(instrument)) %>% - na.omit()) - p_hat = model.first_stage$fitted.values - - if(report_iv == TRUE){ - print(summary(model.first_stage)) - } - - # second stage least squares - # to estimate first column of B - # (automatically scales first entry to 1) - instrumented_shocks = covariates %>% - purrr::map_df(.f = function(X){ - - model.second_stage = lm(data[,X] ~ p_hat) - second_stage_beta = model.second_stage$coefficients[2] - return(second_stage_beta) - - }) %>% - as.vector() - - # scale size of the shock - # see Gertler and Karadi (2015) for background - # see Cesa-Bianchi's VAR-Toolbox for MATLAB implementation - X.demean = - select(data, -date, -forecast.date, -dplyr::all_of(instrument)) %>% - mutate_all(function(X){return(X - mean(X, na.rm = T))}) %>% - as.matrix() - - sigma_b = 1/(nrow(data) - ncol(var$model$coef) + 1) * t(X.demean) %*% X.demean - - s21s11 = instrumented_shocks[2:length(covariates),] %>% as.matrix() - S11 = sigma_b[1,1] %>% as.numeric() - S21 = sigma_b[2:nrow(sigma_b),1] %>%as.vector() - S22 = sigma_b[2:nrow(sigma_b),2:ncol(sigma_b)] - - Q = (s21s11 * S11) %*% t(s21s11) - (S21 %*% t(s21s11) + as.matrix(s21s11) %*% t(S21)) + S22 - sp = sqrt( S11 - t(S21 - as.matrix(s21s11) %*% S11) %*% as.matrix(solve(Q) %*% (S21 - s21s11 * S11)) ) - - scaled_instrumented_shocks = instrumented_shocks * as.numeric(sp) - - # prepare B matrix - B = matrix(0, ncol = (length(covariates) - 1), nrow = length(covariates)) - B = cbind(scaled_instrumented_shocks, B) - - return(B) - - }else if(var$structure == 'IV-short'){ - - # align instrument and residuals - valid_dates = - dplyr::inner_join( - dplyr::select(na.omit(var$residuals[[1]]), date), - dplyr::select(na.omit(var$instrument), date), - by = 'date' - ) - - # set residuals matrix - residuals = var$residuals[[1]] %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date, -forecast.date) %>% - as.matrix() - - residuals = -1*residuals - - # set instrument - instrument = var$instrument %>% - data.frame() %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date) - - instrument.mean = instrument %>% - dplyr::mutate_all(mean, na.rm = T) - - # number of observations - n.obs = nrow(var$data) - - # solve for IV implied impact - pi = solve(n.obs^(-1) * t(residuals) %*% residuals) %*% - (n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean)) - - phi_sq = - (n.obs^(-1) * t(instrument - instrument.mean) %*% residuals) %*% - solve( n.obs^(-1) * t(residuals) %*% residuals ) %*% - ( n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean) ) - - B1 = - n.obs^(-1) * - ( t(residuals) %*% as.matrix(instrument - instrument.mean) ) %*% - (phi_sq)^(-0.5) - - B = matrix(ncol = ncol(residuals), nrow = ncol(residuals)) - B[,1:ncol(instrument)] = B1 - rownames(B) = colnames(B) = colnames(residuals) - - # solve for orthogonalized structural shock - model.first_stage = lm(instrument[,1] ~ residuals) - orthogonal_instrument = instrument - model.first_stage$residuals - orthogonal_instrument = orthogonal_instrument[,] / sd(orthogonal_instrument[,], na.rm = T) - - shock_matrix = matrix(nrow = nrow(residuals), ncol = ncol(residuals)) - shock_matrix[,1] = orthogonal_instrument - - # reduced form variance-covariance matrix - sigma = stats::var(residuals) - - # solve additional entries - # with a lower triangular restriction - order_sequence = c(1:ncol(residuals)) - - for(i in order_sequence){ - - Y = residuals[,i] - X = shock_matrix[,c(1:i)] - model.second_stage = lm(Y~X) - - if(i != tail(order_sequence,1)){ - - B[i,] = - c( model.second_stage$coef[-1], - sd(model.second_stage$residuals), - rep(0, length(order_sequence) - ncol(instrument) - i) ) - - shock_matrix[,i+1] = model.second_stage$residuals / sd(model.second_stage$residuals) - - }else{ - - B[i,] = model.second_stage$coef[-1] - - } - - } - - # make diagonal entries positive - shock_ordering = data.frame(residuals) %>% - select(var$instrumented, dplyr::everything()) %>% - colnames() - B.sign = diag(sign(diag(B[shock_ordering,]))) - B = B %*% B.sign - - return(B) - - }else{ - - stop('structure must be set to either NULL, "short", or "IV".') - - } - -} - -IRF = function (Phi, B, lag, structure = NULL){ - - # cast data as matrices - if (!is.matrix(Phi)) - Phi = as.matrix(Phi) - if (!is.matrix(B)) - B = as.matrix(B) - - - # set dimensions - k = nrow(Phi) - m = ncol(Phi) - p = floor(m/k) - Si = diag(rep(1, k)) - wk = c(Si) - awk = c(wk) - acuwk = c(awk) - - # set lag - if (p < 1) - p = 1 - if (lag < 1) - lag = 1 - - # estimate IRFs - for (i in 1:lag) { - if (i <= p) { - idx = (i - 1) * k - tmp = Phi[, (idx + 1):(idx + k)] - } - else { - tmp = matrix(0, k, k) - } - jj = i - 1 - jp = min(jj, p) - if (jp > 0) { - for (j in 1:jp) { - jdx = (j - 1) * k - idx = (i - j) * k - w1 = Phi[, (jdx + 1):(jdx + k)] - w2 = Si[, (idx + 1):(idx + k)] - tmp = tmp + w1 %*% w2 - } - } - Si = cbind(Si, tmp) - wk = cbind(wk, c(tmp)) - awk = awk + c(tmp) - acuwk = cbind(acuwk, awk) - } - orSi = NULL - wk1 = NULL - awk1 = NULL - acuwk1 = NULL - - irf = Si - - if (!is.null(structure)) { - - P = B - wk1 = cbind(wk1, c(P)) - awk1 = wk1 - acuwk1 = wk1 - orSi = cbind(orSi, P) - for (i in 1:lag) { - idx = i * k - w1 = Si[, (idx + 1):(idx + k)] - w2 = w1 %*% P - orSi = cbind(orSi, w2) - wk1 = cbind(wk1, c(w2)) - awk1 = awk1 + c(w2) - acuwk1 = cbind(acuwk1, awk1) - } - - irf = orSi - - } - - # return results - return(irf) - -} - -var_irf = function( - var, # VAR output - horizon = 10, # int: number of periods - CI = c(0.1, 0.9), # numeric vector: c(lower ci bound, upper ci bound) - bootstrap.type = 'auto', # string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used - bootstrap.num = 100, # int: number of bootstraps - bootstrap.parallel = FALSE, # boolean: create IRF draws in parallel - bootstrap.cores = -1 # int: number of cores to use in parallel processing; -1 detects and uses half the available cores -){ - - # NOTES: scaled wild is used for IV, consistent with the proxy SVAR literature, - # while standard resample is used for others - - # function warnings - if(!is.numeric(bootstrap.num) | bootstrap.num %% 1 != 0){ - stop('bootstrap.num must be an integer') - } - if(!is.numeric(CI) | length(CI) != 2 | min(CI) < 0 | max(CI) > 1 | is.na(sum(CI))){ - stop('CI must be a two element numeric vector bound [0,1]') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - - # function variables - y = forecast.date = shock = target = response = response.lower = response.upper = response.med = response.adjust = NULL - - # set data - coef = var$model$coef - residuals = var$residuals[[1]] - data = var$data - - # set model variables - p = var$model$p - freq = var$model$freq - structure = var$structure - - if('const' %in% colnames(coef) & 'trend' %in% colnames(coef)){ - type = 'both' - }else if('const' %in% colnames(coef)){ - type = 'const' - }else if('trend' %in% colnames(coef)){ - type = 'trend' - } - - regressors = colnames(dplyr::select(data, -date)) - if(!is.null(var$regime)){regressors = regressors[regressors != var$regime$regime]} - regressors.cols = paste0(regressors, '.l1') - - # set IRF variables - p.lower = CI[1] - p.upper = CI[2] - - ### calculate impulse responses -------------- - - # estimate error-covariance matrix or structural impact matrix - B = solve_B(var) - B = as.matrix(B) - - # estimate IRFs - phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')) - - irf = IRF(Phi = phi, - B = B, - lag = horizon, - structure = structure) - - # reorganize results - irf = data.frame(t(irf)) - - if(!is.null(structure)){ - if(structure == 'IV-short'){ - shocks = c(var$instrumented, regressors[!regressors %in% var$instrumented]) - irf$shock = rep(shocks, horizon + 1) - } - }else{ - irf$shock = rep(regressors, horizon + 1) - } - - irf$horizon = sort(rep(c(0:horizon), length(regressors))) - irf = dplyr::arrange(irf, shock, horizon) - rownames(irf) = NULL - colnames(irf) = c(regressors, 'shock', 'horizon') - - ### bootstrap IRF confidence intervals --------- - - # 0. set up parallel back-end - if(bootstrap.parallel == TRUE){ - if(bootstrap.cores == -1){ - n.cores = floor(future::availableCores() / 2) - }else{ - n.cores = bootstrap.parallel - } - future::plan(future::multisession, workers = n.cores) - }else{ - future::plan(future::sequential) - } - - # 1. create bootstrap time series - bagged.irf = as.list(1:bootstrap.num) %>% - furrr::future_map(.f = function(count){ - - # bootstrap residuals - if(bootstrap.type == 'wild' | - bootstrap.type == 'auto' & structure == 'IV' | - bootstrap.type == 'auto' & structure == 'IV-short'){ - - # 'wild' bootstrap technique for simple distribution - # using observed scaled residuals with random sign flip. - # See the Rademacher distribution. - U = residuals[,-c(1,2)] - r = sample(c(-1,1), size = nrow(U), replace = T) - U = sweep(U, MARGIN = 1, r, `*`) - - }else if(bootstrap.type == 'standard' | bootstrap.type == 'auto' & structure != 'IV'){ - - # standard bootstrap technique a al Lutkepohl (2005) - # draw residuals with replacement - U = na.omit(residuals) - U = U[sample(c(1:nrow(U)), - size = nrow(residuals), - replace = TRUE),] - U = U %>% - dplyr::select(-date, -forecast.date) %>% - dplyr::mutate_all(function(X){return(X-mean(X, na.rm = T))}) - } - - # recursively build synthetic data - Y = matrix(nrow = nrow(var$data), ncol = ncol(var$data)-1) - Y = data.frame(Y); colnames(Y) = regressors - Y[1:p, ] = var$data[1:p, -1] - - for(i in (p+1):nrow(var$data)){ - - X = Y[(i-p):(i-1),] - X = embed(as.matrix(X), dimension = p) - X = data.frame(X) - - if(type %in% c('const', 'both')){X$const = 1} - if(type %in% c('trend', 'both')){X$trend = c(1:nrow(X))} - - X.hat = as.matrix(coef[,-1]) %*% t(as.matrix(X)) - X.hat = t(X.hat) - - Y[i, ] = X.hat - U[i,] - - } - - Y$date = data$date - - # estimate VAR with synthetic data - var.bag = - VAR( - data = Y, - p = p, - horizon = 1, - freq = freq, - type = type, - structure = structure) - - # bootstrap instrument - if(!is.null(structure)){ - if(structure == 'IV' | structure == 'IV-short'){ - var.bag$instrument = var$instrument - var.bag$instrument[,-1] = sweep(var.bag$instrument[,-1], MARGIN = 1, r, `*`) - - var.bag$instrumented = var$instrumented - } - } - - # estimate error-covariance matrix or structural impact matrix - B.bag = solve_B(var.bag, report_iv = FALSE) - - # set bagged coef matrix - coef.bag = dplyr::select(var.bag$model$coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) - - # estimate IRFs - irf.bag = IRF(Phi = coef.bag, - B = B.bag, - lag = horizon, - structure = structure) - - # reorganize results - irf.bag = data.frame(t(irf.bag)) - - if(!is.null(structure)){ - if(structure == 'IV-short'){ - shocks = c(var$instrumented, regressors[!regressors %in% var$instrumented]) - irf.bag$shock = rep(shocks, horizon + 1) - } - }else{ - irf.bag$shock = rep(regressors, horizon + 1) - } - - irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) - irf.bag = dplyr::arrange(irf.bag, shock, horizon) - rownames(irf.bag) = NULL - colnames(irf.bag) = c(regressors, 'shock', 'horizon') - - return(irf.bag) - - }) - - # 3. calculate confidence intervals - - # collapse to data.frame - ci = bagged.irf %>% - purrr::reduce(dplyr::bind_rows) - - # remove unused shocks in the case of IV - if(!is.null(var$structure)){ - if(var$structure == 'IV'){ - ci = ci %>% - dplyr::filter(shock == regressors[1]) - } - } - - # correct shock names in the case of IV-short - if(!is.null(var$structure)){ - if(var$structure == 'IV-short'){ - - } - } - - # estimate bands - ci = ci %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% - group_by(shock, horizon, target) %>% - summarize(response.lower = quantile(response, probs = p.lower, na.rm = T), - response.upper = quantile(response, probs = p.upper, na.rm = T), - response.mean = mean(response, na.rm = T) ) - - # combine point estimates and CI - irf = irf %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% - dplyr::arrange(shock, horizon) - - irf = - full_join( - irf, ci, - by = c('shock', 'target', 'horizon') - ) - - # adjust for bias in CI - # (bias can be introduced in the bootstrapping if residuals are not actually mean zero) - irf = irf %>% - dplyr::mutate( - response.adjust = response - response.mean, - response.lower = response.lower + response.adjust, - response.upper = response.upper + response.adjust - ) %>% - dplyr::select(target, shock, horizon, response.lower, response, response.upper) %>% - dplyr::arrange(target, shock, horizon) - - ### return output -------------- - return(irf) -} - - -# ##################################################################### -# TEST - -library(tidyverse) -library(lubridate) - -# Data -data.macro = read_csv('/scratch/m1tjp01/Andrea/FinancialStability/Data/Intermediate/model_monthly_macro_covariates.csv') -data.macro = data.macro %>% filter(year(date) >= 1991) -data.macro = data.macro %>% select(date, PCE, UNEMP, PR, EBP, mp_shock) - -# MODEL -var = - VAR( - data = data.macro, - horizon = 1, - freq = 'month', - p = 1, - structure = 'IV-short', - instrument = 'mp_shock', - instrumented = 'PR' - ) - -irf = var_irf(var, horizon = 20, bootstrap.type = 'wild', bootstrap.num = 10, bootstrap.parallel = TRUE) -plot_irf(irf, shocks = 'PR') - diff --git a/R/helper.R b/R/helper.R new file mode 100644 index 0000000..2685d11 --- /dev/null +++ b/R/helper.R @@ -0,0 +1,98 @@ +#------------------------------------------ +# Data helper functions +#------------------------------------------ +# create n lags +n.lag = function( + Data, # data.frame: data frame of variables to lag and a 'date' column + lags, # int: number of lags to create + variables = NULL # string: vector of variable names to lag, default is all non-date variables +){ + + # function variables + date = NULL + + if(is.null(variables)){ + variables = names(dplyr::select(Data, -dplyr::contains('date'))) + } + + if(!'date' %in% colnames(Data)){ + no.date = TRUE + Data$date = c(1:nrow(Data)) + }else{ + no.date = FALSE + } + + Data = c(0:lags) %>% + purrr::map( + .f = function(n){ + + if(n == 0){return(Data)} + + X = Data %>% + dplyr::mutate_at(variables, dplyr::lag, n) + + names(X)[names(X) != 'date'] = paste0(names(X)[names(X) != 'date'], '.l', n) + + return(X) + } + ) %>% + purrr::reduce(dplyr::full_join, by = 'date') + + if(no.date == TRUE){ + Data = dplyr::select(Data, -date) + } + + return(Data) +} + +# adjust forecast dates +forecast_date = function( + forecast.date, + horizon, + freq +){ + + date = forecast.date + + if(freq == 'day'){ + date = forecast.date + horizon + }else if(freq == 'week'){ + lubridate::week(date) = lubridate::week(date) + horizon + }else if(freq == 'month'){ + lubridate::month(date) = lubridate::month(date) + horizon + }else if(freq == 'quarter'){ + lubridate::month(date) = lubridate::month(date) + horizon*3 + }else if(freq == 'year'){ + lubridate::year(date) = lubridate::year(date) + horizon + } + + return(date) +} + +#------------------------------------------ +# Information criterion +#------------------------------------------ +IC = function( + ic = 'AIC', + errors, + data, + p +){ + + cov.matrix = stats::var(stats::na.omit(dplyr::select(errors, -date))) + + regressors = colnames(dplyr::select(data, -date)) + regressors.cols = paste0(regressors, '.l1') + k = length(regressors.cols) + + sample.size = nrow(stats::na.omit(data)) + + if(ic == 'AIC'){ + score = log(sum(abs(cov.matrix))) + (2*k^2*p)/sample.size + }else if(ic == 'BIC'){ + score = log(sum(abs(cov.matrix))) + (log(sample.size)*k^2*p)/sample.size + } + + return(score) + +} diff --git a/R/lp.R b/R/lp.R index 153cdbd..10537c4 100644 --- a/R/lp.R +++ b/R/lp.R @@ -170,6 +170,9 @@ LP_estimate = function( #' @seealso [RLP()] #' @seealso [rlp_irf()] #' +#' @references +#' 1. Jorda, Oscar "[Estimation and Inference of Impulse Responses by Local Projections](https://www.aeaweb.org/articles?id=10.1257/0002828053828518)" 2005. +#' #' @examples #' \donttest{ #' @@ -530,6 +533,11 @@ RLP_estimate = function( #' Estimate regime-dependent local projections #' +#' Estimate a regime-dependent local projection (i.e. a state-dependent LP), with an exogenous state indicator, of the specification: +#' \deqn{Y_{t+h} = X_t \beta_{s_t} + \epsilon_t} +#' where *t* is the time index, and *s* is a mutually exclusive state of the world observed at time *t*. When the regime vector is +#' not supplied by the user, then a two-state regime series is estimated via random forest. +#' #' @param data data.frame, matrix, ts, xts, zoo: Endogenous regressors #' @param horizons int: forecast horizons #' @param freq string: frequency of data ('day', 'week', 'month', 'quarter', or 'year') @@ -544,13 +552,18 @@ RLP_estimate = function( #' @param regime.method string: regime assignment technique ('rf', 'kmeans', 'EM', 'BP') #' @param regime.n int: number of regimes to estimate (applies to kmeans and EM) #' -#' @return list object with elements `data`, `model`, `forecasts`, `residuals`; if there is more than one forecast horizon estimated, then `model`, `forecasts`, `residuals` will each be a list where each element corresponds to a single horizon +#' @return list of lists, one list per regime, each regime with objects with elements `data`, `model`, `forecasts`, `residuals`; +#' if there is more than one forecast horizon estimated, then `model`, `forecasts`, `residuals` +#' will each be a list where each element corresponds to a single horizon #' #' @seealso [LP()] #' @seealso [lp_irf()] #' @seealso [RLP()] #' @seealso [rlp_irf()] #' +#' @references +#' 1. Jorda, Oscar "[Estimation and Inference of Impulse Responses by Local Projections](https://www.aeaweb.org/articles?id=10.1257/0002828053828518)" 2005. +#' #' @examples #' \donttest{ #' diff --git a/R/plot_irf.R b/R/plot_irf.R index 1cf4c42..8acc1b8 100644 --- a/R/plot_irf.R +++ b/R/plot_irf.R @@ -34,8 +34,8 @@ plot_individual_irf = function( irf.plot <- plotdata %>% ggplot2::ggplot(ggplot2::aes(x=horizon, y=response, ymin=response.lower, ymax=response.upper)) + ggplot2::geom_hline(yintercept = 0, color="black") + - ggplot2::geom_ribbon(fill="lightblue", alpha=0.2) + - ggplot2::geom_line(color = 'darkred', lwd = 1.25) + + ggplot2::geom_ribbon(fill="royalblue", alpha=0.2) + + ggplot2::geom_line(color = 'darkred', lwd = 1) + ggplot2::theme_light() + ggplot2::ggtitle(title)+ ggplot2::ylab(ylab)+ diff --git a/R/regimes.R b/R/regimes.R index d0a3dc4..a5c261e 100644 --- a/R/regimes.R +++ b/R/regimes.R @@ -35,7 +35,6 @@ #' } #' #' @export - regimes = function( data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors method = 'rf', # string: regime assignment technique ('rf', 'kmeans', 'EM', 'BP') diff --git a/R/sovle_b.R b/R/sovle_b.R new file mode 100644 index 0000000..0ac9276 --- /dev/null +++ b/R/sovle_b.R @@ -0,0 +1,218 @@ + +#------------------------------------------ +# Function to estimate the structural +# error impact matrix +# +# See Cesa-Bianchi's VAR-Toolbox for +# MATLAB implementation +# +# IV-short implementation based on matlab +# code by Nick von Turkovich and Paco +# Vazquez-Grande +#------------------------------------------ +solve_B = function(var, report_iv = TRUE){ + + # function variables + forecast.date = NA + + # set residuals + if(class(var) == 'VAR'){ + residuals = var$residuals[[1]] + }else{ + residuals = var$residuals + } + + if(is.na(var$structure) == TRUE){ + + # retrieve reduced-form residuals + data.residuals = residuals + + # reduced form variance-covariance matrix + cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) + + B = cov.matrix + + return(B) + + }else if(var$structure == 'short'){ + + # retrieve reduced-form residuals + data.residuals = residuals + + # reduced form variance-covariance matrix + cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) + + # take cholesky decomposition + B = t(chol(cov.matrix)) + + return(B) + + }else if(var$structure == 'IV'){ + + # retrieve reduced-form residuals + data.residuals = residuals + covariates = colnames(dplyr::select(data.residuals, -date, -forecast.date)) + col_to_instrument = var$instrumented + + # retrieve instrument + data.instrument = var$instrument + instrument = colnames(dplyr::select(data.instrument, -date)) + + # combine data + data = + dplyr::inner_join(data.residuals, data.instrument, by = 'date') %>% + stats::na.omit() + + # first stage least squares + model.first_stage = + stats::lm(col_to_instrument ~., + data = data %>% + dplyr::select(col_to_instrument = dplyr::all_of(col_to_instrument), dplyr::all_of(instrument)) %>% + stats::na.omit()) + p_hat = model.first_stage$fitted.values + + if(report_iv == TRUE){ + print(summary(model.first_stage)) + } + + # second stage least squares + # to estimate first column of B + # (automatically scales first entry to 1) + instrumented_shocks = covariates %>% + purrr::map_df(.f = function(X){ + + model.second_stage = stats::lm(data[,X] ~ p_hat) + second_stage_beta = model.second_stage$coefficients[2] + return(second_stage_beta) + + }) %>% + as.vector() + + # scale size of the shock + # see Gertler and Karadi (2015) for background + # see Cesa-Bianchi's VAR-Toolbox for MATLAB implementation + X.demean = + dplyr::select(data, -date, -forecast.date, -dplyr::all_of(instrument)) %>% + dplyr::mutate_all(function(X){return(X - mean(X, na.rm = T))}) %>% + as.matrix() + + sigma_b = 1/(nrow(data) - ncol(var$model$coef) + 1) * t(X.demean) %*% X.demean + + s21s11 = instrumented_shocks[2:length(covariates),] %>% as.matrix() + S11 = sigma_b[1,1] %>% as.numeric() + S21 = sigma_b[2:nrow(sigma_b),1] %>%as.vector() + S22 = sigma_b[2:nrow(sigma_b),2:ncol(sigma_b)] + + Q = (s21s11 * S11) %*% t(s21s11) - (S21 %*% t(s21s11) + as.matrix(s21s11) %*% t(S21)) + S22 + sp = sqrt( S11 - t(S21 - as.matrix(s21s11) %*% S11) %*% as.matrix(solve(Q) %*% (S21 - s21s11 * S11)) ) + + scaled_instrumented_shocks = instrumented_shocks * as.numeric(sp) + + # prepare B matrix + B = matrix(0, ncol = (length(covariates) - 1), nrow = length(covariates)) + B = cbind(scaled_instrumented_shocks, B) + + return(B) + + }else if(var$structure == 'IV-short'){ + + # align instrument and residuals + valid_dates = + dplyr::inner_join( + dplyr::select(stats::na.omit(residuals), date), + dplyr::select(stats::na.omit(var$instrument), date), + by = 'date' + ) + + # set residuals matrix + residuals = residuals %>% + dplyr::inner_join(valid_dates, by = 'date') %>% + dplyr::select(-date, -forecast.date) %>% + as.matrix() + + residuals = -1*residuals + + # set instrument + instrument = var$instrument %>% + data.frame() %>% + dplyr::inner_join(valid_dates, by = 'date') %>% + dplyr::select(-date) + + instrument.mean = instrument %>% + dplyr::mutate_all(mean, na.rm = T) + + # number of observations + n.obs = nrow(residuals) + + # solve for IV implied impact + pi = solve(n.obs^(-1) * t(residuals) %*% residuals) %*% + (n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean)) + + phi_sq = + (n.obs^(-1) * t(instrument - instrument.mean) %*% residuals) %*% + solve( n.obs^(-1) * t(residuals) %*% residuals ) %*% + ( n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean) ) + + B1 = + n.obs^(-1) * + ( t(residuals) %*% as.matrix(instrument - instrument.mean) ) %*% + (phi_sq)^(-0.5) + + B = matrix(ncol = ncol(residuals), nrow = ncol(residuals)) + B[,1:ncol(instrument)] = B1 + rownames(B) = colnames(B) = colnames(residuals) + + # solve for orthogonalized structural shock + model.first_stage = stats::lm(instrument[,1] ~ residuals) + orthogonal_instrument = instrument - model.first_stage$residuals + orthogonal_instrument = orthogonal_instrument[,] / stats::sd(orthogonal_instrument[,], na.rm = T) + + shock_matrix = matrix(nrow = nrow(residuals), ncol = ncol(residuals)) + shock_matrix[,1] = orthogonal_instrument + + # reduced form variance-covariance matrix + sigma = stats::var(residuals) + + # solve additional entries + # with a lower triangular restriction + order_sequence = c(1:ncol(residuals)) + + for(i in order_sequence){ + + Y = residuals[,i] + X = shock_matrix[,c(1:i)] + model.second_stage = stats::lm(Y~X) + + if(i != utils::tail(order_sequence,1)){ + + B[i,] = + c( model.second_stage$coef[-1], + stats::sd(model.second_stage$residuals), + rep(0, length(order_sequence) - ncol(instrument) - i) ) + + shock_matrix[,i+1] = model.second_stage$residuals / stats::sd(model.second_stage$residuals) + + }else{ + + B[i,] = model.second_stage$coef[-1] + + } + + } + + # make diagonal entries positive + shock_ordering = data.frame(residuals) %>% + dplyr::select(var$instrumented, dplyr::everything()) %>% + colnames() + B.sign = diag(sign(diag(B[shock_ordering,]))) + B = B %*% B.sign + + return(B) + + }else{ + + stop('structure must be set to either NA, "short", "IV", or "IV-short".') + + } + +} diff --git a/R/var.R b/R/var.R index 1ad7879..6829c1c 100644 --- a/R/var.R +++ b/R/var.R @@ -51,7 +51,7 @@ VAR_estimation = function( se = broom::tidy(model) %>% dplyr::select(term, std.error) se$y = target - ll = stats::logLik(model) + ll = stats::logLik(model)[1] # return results return(list(coef = c, se = se, ll = ll)) @@ -73,7 +73,7 @@ VAR_estimation = function( # extract log likelihood ll = purrr::map(models, .f = function(X){return(X$ll)}) %>% - purrr::reduce(dplyr::bind_rows) %>% + purrr::reduce(rbind) %>% sum() # package for return @@ -176,18 +176,18 @@ VAR_estimation = function( #' @param p int: lags #' @param lag.ic string: information criterion to choose the optimal number of lags ('AIC' or 'BIC') #' @param lag.max int: maximum number of lags to test in lag selection -#' @param structure string: type of structural identification strategy to use in model analysis (NULL, 'short', 'IV', or 'IV-short') +#' @param structure string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short') #' @param instrument string: name of instrumental variable contained in the data matrix #' @param instrumented string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data #' #' @return #' 1. data: data.frame with endogenous variables and 'date' column. -#' 2. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of type +#' 2. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood #' 3. forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast #' 4. residuals: list of data.frames per horizon; data.frame of residuals -#' 5. structure: string denoting which structural identification strategy will be used in analysis (or NULL) +#' 5. structure: string denoting which structural identification strategy will be used in analysis (or NA) #' 6. instrument: data.frame with 'date' column and 'instrument' column (or NULL) -#' 7. instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NULL) +#' 7. instrumented: string denoting which column will be instrumented in 'IV' and 'IV-short' strategies (or NA) #' #' @seealso [VAR()] #' @seealso [var_irf()] @@ -280,8 +280,8 @@ VAR = function( if(!freq %in% c('day','week','month','quarter','year')){ stop("freq must be one of the following strings: 'day','week','month','quarter','year'") } - if(!structure %in% c('short', 'IV', 'IV-short') & !is.null(structure)){ - stop("strucutre must be one of 'strucutre', 'IV', 'IV-short', or NULL.") + if(!structure %in% c(NA, 'short', 'IV', 'IV-short')){ + stop("structure must be one of 'short, 'IV', 'IV-short', or NA") } if(!is.null(instrument)){ if(!instrument %in% colnames(data)){ @@ -308,7 +308,7 @@ VAR = function( } # detect variable to be instrumented - if(is.null(instrumented)){ + if(is.null(instrumented) & structure %in% c('IV', 'IV-short')){ var_to_instrument = colnames(dplyr::select(data, -date))[1] }else{ var_to_instrument = instrumented @@ -391,28 +391,6 @@ RVAR_estimate = function( type = 'const' # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') ){ - # function warnings - if(!is.matrix(data) & !is.data.frame(data)){ - stop('data must be a matrix or data.frame') - } - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - if(!freq %in% c('day','week','month','quarter','year')){ - stop("freq must be one of the following strings: 'day','week','month','quarter','year'") - } - if(!regime %in% colnames(data)){ - stop('regime must be the name of a column in data') - } - - # cast as data frame if ts, xts, or zoo object - if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ - data = data.frame(date = zoo::index(date), data) - } - # function variables term = estimate = std.error = model.regime = NULL @@ -458,11 +436,16 @@ RVAR_estimate = function( c = broom::tidy(model) %>% dplyr::select(term, coef = estimate) c$y = target + # standard errors se = broom::tidy(model) %>% dplyr::select(term, std.error) se$y = target + # log likelihood + ll = stats::logLik(model)[1] + # return results - return(list(coef = c, se = se)) + return(list(coef = c, se = se, ll = ll)) + }) # extract coefficients @@ -472,14 +455,20 @@ RVAR_estimate = function( tidyr::pivot_wider(values_from = coef, names_from = term) - # extract coefficients + # extract coefficients' standard errors se = purrr::map(models, .f = function(X){return(X$se)}) %>% purrr::reduce(dplyr::bind_rows) %>% tidyr::pivot_wider(values_from = std.error, names_from = term) + # extract log likelihood + ll = + purrr::map(models, .f = function(X){return(X$ll)}) %>% + purrr::reduce(rbind) %>% + sum() + # package for return - model = list(coef = coef, se = se, p = p, freq = freq, horizon = horizon, regime = regime.val) + model = list(coef = coef, se = se, p = p, freq = freq, type = type, ll = ll, horizon = horizon, regime = regime.val) }) @@ -586,20 +575,20 @@ RVAR_estimate = function( ) ) - }) + }) ### Organize and return output ------- forecasts = as.list(c(1:horizon)) %>% purrr::map(.f = function(h){ - f = fr %>% - purrr::map(.f = function(r){ - return(r$forecast[[paste0('H_',h)]]) - }) %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::arrange(date) + f = fr %>% + purrr::map(.f = function(r){ + return(r$forecast[[paste0('H_',h)]]) + }) %>% + purrr::reduce(dplyr::bind_rows) %>% + dplyr::arrange(date) - }) + }) residuals = as.list(c(1:horizon)) %>% @@ -629,18 +618,15 @@ RVAR_estimate = function( ) } -#' Estimate regime-dependent VAR + +#' Estimate regime-dependent VAR, SVAR, or Proxy-SVAR #' #' Estimate a regime-dependent VAR (i.e. a state-dependent VAR), with an exogenous state indicator, of the specification: -#' \deqn{Y_t = X \beta_s + \epsilon_t} -#' where t is the time index, Y is the set of outcome vectors, X the design matrix (of p lagged values of Y), and -#' s is a mutually exclusive state of the world observed at time t-1. When the regime vector is not supplied by the user, then a two-state +#' \deqn{Y_{t+1} = X_t \beta_{s_t} + \epsilon_t} +#' where *t* is the time index, *Y* is the set of outcome vectors, *X* the design matrix (of *p* lagged values of Y), and +#' *s* is a mutually exclusive state of the world observed at time *t*. When the regime vector is not supplied by the user, then a two-state #' regime series is estimated via random forest. #' -#' @details The regime-dependent VAR is a generalization of the popular threshold VAR - which trades off estimating a threshold value for an -#' endogenous variable for accepting an exogenous regime that can be based on information from inside or outside of the system, with or without parametric -#' assumptions, and with or without timing restrictions. -#' #' #' @param data data.frame, matrix, ts, xts, zoo: Endogenous regressors #' @param horizon int: forecast horizons @@ -652,17 +638,25 @@ RVAR_estimate = function( #' @param regime string: name or regime assignment vector in the design matrix (data) #' @param regime.method string: regime assignment technique ('rf', 'kmeans', 'EM', or 'BP') #' @param regime.n int: number of regimes to estimate (applies to kmeans and EM) +#' @param structure string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short') +#' @param instrument string: name of instrumental variable contained in the data matrix +#' @param instrumented string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data #' -#' @return list of lists, each regime returns its own list with elements `data`, `model`, `forecasts`, `residuals` +#' @return +#' List of lists, where each regime is a list with items: +#' 1. data: data.frame with endogenous variables and 'date' column. +#' 2. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood, regime indicator +#' 3. forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast +#' 4. residuals: list of data.frames per horizon; data.frame of residuals +#' 5. structure: string denoting which structural identification strategy will be used in analysis (or NA) +#' 6. instrument: data.frame with 'date' column and 'instrument' column (or NULL) +#' 7. instrumented: string denoting which column will be instrumented in 'IV' and 'IV-short' strategies (or NULL) #' #' @seealso [VAR()] -#' @seealso [var_irf()] -#' @seealso [var_fevd()] -#' @seealso [var_hd()] #' @seealso [RVAR()] -#' @seealso [rvar_irf()] -#' @seealso [rvar_fevd()] -#' @seealso [rvar_hd()] +#' @seealso [IRF()] +#' @seealso [FEVD()] +#' @seealso [HD()] #' #' @examples #' \donttest{ @@ -686,17 +680,44 @@ RVAR_estimate = function( #' lag.ic = 'BIC', #' lag.max = 4) #' -#' # impulse response functions -#' rvar.irf = sovereign::rvar_irf(rvar) +#' # impulse response functions +#' rvar.irf = sovereign::rvar_irf(rvar) #' -#' # forecast error variance decomposition -#' rvar.fevd = sovereign::rvar_fevd(rvar) +#' # forecast error variance decomposition +#' rvar.fevd = sovereign::rvar_fevd(rvar) #' -#' # historical shock decomposition -#' rvar.hd = sovereign::rvar_hd(rvar) +#' # historical shock decomposition +#' rvar.hd = sovereign::rvar_hd(rvar) #' #' } #' +#' @details +#' The regime-dependent VAR is a generalization of the popular threshold VAR - which trades off estimating a threshold value for an +#' endogenous variable for accepting an exogenous regime that can be based on information from inside or outside of the system, with or without parametric +#' assumptions, and with or without timing restrictions. Moreover, the RVAR may be extended to include structural shocks, including the use of instrumental +#' variables. +#' +#' **State dependence.** The RVAR augments the traditional VAR by allowing state-dependence in the coefficient matrix. The RVAR differs from the more common threshold VAR (TVAR), due +#' to the fact that states are exogenously determined. As a result, the states (i.e. regimes) do not need to be based on information inside the model, moreover, regimes can be +#' determined by any process the user determines best fits their needs. For example, regimes based on NBER dated recessions and expansions are based on a judgmental process +#' considering hundreds of series, potentially none of which are in the VAR being modeled. Alternatively, a user may use unsupervised machine learning to assign regimes - this is +#' the process the `sovereign::regimes` function facilitates. +#' +#' **Structural shocks.** See Sims (1980) for details regarding the baseline vector-autoregression (VAR) model. The VAR may be augmented to become a structural VAR (SVAR) with one of three different structural identification strategies: +#' 1) short-term impact restrictions via Cholesky decomposition, see Christiano et al (1999) for details **(structure = 'short')** +#' 2) external instrument identification, i.e. a Proxy-SVAR strategy, see Mertens and Ravn (2013) for details **(structure = 'IV')** +#' 3) or a combination of short-term and IV identification via Lunsford (2015) **(structure = 'IV-short')** +#' +#' Note that including structure does not change the estimation of model coefficients or forecasts, but does change impulse response functions, forecast error variance decomposition, +#' and historical decompositions. Historical decompositions will not be available for models using the 'IV' structure. Additionally note that only one instrument may be used in this +#' estimation routine. +#' +#' @references +#' 1. Christiano, Lawrence, Martin Eichenbaum, and Charles Evans "[Monetary policy shocks: What have we learned and to what end?](https://www.sciencedirect.com/science/article/pii/S1574004899010058)" Handbook of Macroeconomics, Vol 1, Part A, 1999. +#' 2. Lunsford, Kurt "[Identifying Structural VARs with a Proxy Variable and a Test for a Weak Proxy](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2699452#)" 2015. +#' 3. Mertens, Karel and Morten Ravn "[The Dynamic Effects of Personal and Corporate Income Tax Changes in the United States](https://www.aeaweb.org/articles?id=10.1257/aer.103.4.1212)" 2013. +#' 4. Sims, Christopher "[Macroeconomics and Reality](https://www.jstor.org/stable/1912017)" 1980. +#' #' @export # regime-dependent VAR function @@ -710,7 +731,10 @@ RVAR = function( lag.max = NULL, # int: maximum number of lags to test in lag selection regime = NULL, # string: name or regime assignment vector in the design matrix (data) regime.method = 'rf', # string: regime assignment technique ('rf', 'kmeans', 'EM', 'BP') - regime.n = 2 # int: number of regimes to estimate (applies to kmeans and EM) + regime.n = 2, # int: number of regimes to estimate (applies to kmeans and EM) + structure = 'short', # string: type of structural identification strategy to use in model analysis (NULL, 'short', or 'IV') + instrument = NULL, # string: name of instrumental variable contained in the data matrix + instrumented = NULL # string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data ){ # function warnings @@ -727,6 +751,52 @@ RVAR = function( stop('lag.max must be an integer if IC-based lag selection is used') } } + if(!structure %in% c(NA, 'short', 'IV', 'IV-short')){ + stop("structure must be one of 'short, 'IV', 'IV-short', or NA") + } + if(!is.null(instrument)){ + if(!instrument %in% colnames(data)){ + stop("instrument must be the name of a variable found in data.") + } + } + if(!is.null(instrumented)){ + if(!instrumented %in% colnames(data)){ + stop("instrumented must be the name of a variable found in data.") + } + } + if(!is.matrix(data) & !is.data.frame(data)){ + stop('data must be a matrix or data.frame') + } + if(!is.numeric(p) | p %% 1 != 0){ + stop('p must be an integer') + } + if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ + stop('horizon must be a positive integer') + } + if(!freq %in% c('day','week','month','quarter','year')){ + stop("freq must be one of the following strings: 'day','week','month','quarter','year'") + } + + # cast as data frame if ts, xts, or zoo object + if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ + data = data.frame(date = zoo::index(date), data) + } + + # set aside instruments + if(!is.null(instrument)){ + data.instrument = dplyr::select(data, date, dplyr::all_of(instrument)) + data = dplyr::select(data, -dplyr::all_of(instrument)) + }else{ + data.instrument = NULL + } + + # detect variable to be instrumented + # (defaults to first non-date column if missing) + if(is.null(instrumented) & structure %in% c('IV', 'IV-short')){ + var_to_instrument = colnames(dplyr::select(data, -date, -regime))[1] + }else{ + var_to_instrument = instrumented + } # create regimes if(is.null(regime)){ @@ -780,9 +850,6 @@ RVAR = function( min.ic = which.min(ic.scores) model = models[[min.ic]] - class(model) = 'RVAR' - return(model) - }else{ model = @@ -794,13 +861,15 @@ RVAR = function( freq = freq, type = type ) - - class(model) = 'RVAR' - return(model) - } + # add structure + model$structure = structure + model$instrument = data.instrument + model$instrumented = var_to_instrument -} + class(model) = 'RVAR' + return(model) +} diff --git a/R/var_fevd.R b/R/var_fevd.R index 009e708..2fbc74e 100644 --- a/R/var_fevd.R +++ b/R/var_fevd.R @@ -2,32 +2,35 @@ # Estimate forecast error variance # source code adapted from the MTS package #--------------------------------------------- -fevd = function(Phi, Sig, lag = 4) +fevd = function(Phi, Sig, lag = 4, structure = NA) { if (length(Phi) > 0) { if (!is.matrix(Phi)) Phi = as.matrix(Phi) } - if (!is.matrix(Sig)) - Sig = as.matrix(Sig) if (lag < 1) lag = 1 + p = 0 + if (length(Phi) > 0) { k = nrow(Phi) m = ncol(Phi) p = floor(m/k) } + q = 0 Si = diag(rep(1, k)) m = (lag + 1) * k m1 = (q + 1) * k + if (m > m1) { Si = cbind(Si, matrix(0, k, (m - m1))) } + if (p > 0) { for (i in 1:lag) { if (i <= p) { @@ -52,39 +55,47 @@ fevd = function(Phi, Sig, lag = 4) Si[, (kdx + 1):(kdx + k)] = tmp } } + orSi = NULL - m1 = chol(Sig) + m1 = Sig P = t(m1) orSi = P + for (i in 1:lag) { idx = i * k w1 = Si[, (idx + 1):(idx + k)] w2 = w1 %*% P orSi = cbind(orSi, w2) } + orSi2 = orSi^2 Ome = orSi2[, 1:k] wk = Ome + for (i in 1:lag) { idx = i * k wk = wk + orSi2[, (idx + 1):(idx + k)] Ome = cbind(Ome, wk) } + FeV = NULL OmeRa = Ome[, 1:k] FeV = cbind(FeV, apply(OmeRa, 1, sum)) OmeRa = OmeRa/FeV[, 1] + for (i in 1:lag) { idx = i * k wk = Ome[, (idx + 1):(idx + k)] FeV = cbind(FeV, apply(wk, 1, sum)) OmeRa = cbind(OmeRa, wk/FeV[, (i + 1)]) } + for (i in 1:(lag + 1)) { idx = (i - 1) * k Ratio = OmeRa[, (idx + 1):(idx + k)] } - FEVdec <- list(irf = Si, orthirf = orSi, Omega = Ome, OmegaR = OmeRa) + + FEVdec = list(irf = Si, orthirf = orSi, Omega = Ome, OmegaR = OmeRa) return(FEVdec) } @@ -94,6 +105,9 @@ fevd = function(Phi, Sig, lag = 4) #-------------------------------------------------------- #' Estimate forecast error variance decomposition #' +#' Estimate forecast error variance decomposition for VARs +#' with either short or 'IV-short' structural errors. +#' #' @param var VAR output #' @param horizon int: number of periods #' @param scale boolean: scale variable contribution as percent of total error @@ -133,7 +147,7 @@ fevd = function(Phi, Sig, lag = 4) #' #' # forecast error variance decomposition #' var.fevd = sovereign::var_fevd(var) -#' +#' #' # historical shock decomposition #' var.hd = sovereign::var_hd(var) #' @@ -151,6 +165,12 @@ var_fevd = function( if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ stop('horizon must be a positive integer') } + if(class(var) != 'VAR'){ + stop('var must be a VAR object') + } + if(!var$structure %in% c('short', 'IV-short')){ + stop('FEVD method is only available for VARs with short or IV-short structural errors') + } # function variables forecast.date = y = shock = error = NULL @@ -162,12 +182,14 @@ var_fevd = function( regressors = colnames(dplyr::select(data, -date)) # error covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(residuals, -date, -forecast.date))) + B = solve_B(var) + B = as.matrix(B) # forecast error variance decomposition - errors = fevd(Phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')), - Sig = cov.matrix, - lag = horizon) + errors = fevd(Phi = dplyr::select(coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')), + Sig = B, + lag = horizon, + structure = var$structure) # reorganize results response = data.frame(t(errors$OmegaR)) @@ -178,7 +200,15 @@ var_fevd = function( # cast to long-form response = response %>% - tidyr::pivot_longer(cols = regressors, names_to = 'response', values_to = 'error') + tidyr::pivot_longer(!c('shock','horizon'), names_to = 'response', values_to = 'error') + + + if(var$structure %in% c('IV-short')){ + labels = c(var$instrumented, regressors[!regressors %in% var$instrumented]) + labels = rep(labels, length(labels) * (horizon+1)) + response$response = labels + response$shock = labels[order(labels)] + } # scale responses if(scale == TRUE){ @@ -193,6 +223,9 @@ var_fevd = function( #' Estimate regime-dependent forecast error variance decomposition #' +#' Estimate forecast error variance decomposition for RVARs +#' with either short or 'IV-short' structural errors. +#' #' @param rvar RVAR output #' @param horizon int: number of periods #' @param scale boolean: scale variable contribution as percent of total error @@ -235,7 +268,7 @@ var_fevd = function( #' #' # forecast error variance decomposition #' rvar.fevd = sovereign::rvar_fevd(rvar) -#' +#' #' # historical shock decomposition #' rvar.hd = sovereign::rvar_hd(rvar) #' @@ -254,6 +287,12 @@ rvar_fevd = function( if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ stop('horizon must be a positive integer') } + if(class(rvar) != 'RVAR'){ + stop('rvar must be a RVAR object') + } + if(!rvar$structure %in% c('short', 'IV-short')){ + stop('FEVD method is only available for VARs with short or IV-short structural errors') + } # function variables forecast.date = y = shock = error = model.regime = NULL @@ -274,16 +313,38 @@ rvar_fevd = function( # set regime specific data coef = rvar$model[[paste0('regime_',regime.val)]]$coef + residuals = rvar$residuals[[1]] %>% dplyr::filter(model.regime == regime.val) - # error covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(residuals, -date, -model.regime, -forecast.date))) + # instrument + if(!is.null(rvar$instrument)){ + instrument = dplyr::inner_join(rvar$instrument, dplyr::select(residuals, date), by = 'date') + }else{ + instrument = NULL + } + + # regime-specific model + model = rvar$model[[paste0('regime_',regime.val)]] + + # structural error variance-covariance matrix + B = + solve_B( + var = list( + model = model, + residuals = residuals %>% + dplyr::select(-model.regime), + structure = rvar$structure, + instrument = instrument, + instrumented = rvar$instrumented + ) + ) # forecast error variance decomposition - errors = fevd(Phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')), - Sig = cov.matrix, - lag = horizon) + errors = fevd(Phi = dplyr::select(coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')), + Sig = B, + lag = horizon, + structure = rvar$structure) # reorganize results response = data.frame(t(errors$OmegaR)) @@ -294,7 +355,15 @@ rvar_fevd = function( # cast to long-form response = response %>% - tidyr::pivot_longer(cols = regressors, names_to = 'response', values_to = 'error') + tidyr::pivot_longer(!c('shock','horizon'), names_to = 'response', values_to = 'error') + + # correct labels for IV-short + if(rvar$structure %in% c('IV-short')){ + labels = c(rvar$instrumented, regressors[!regressors %in% rvar$instrumented]) + labels = rep(labels, length(labels) * (horizon+1)) + response$response = labels + response$shock = labels[order(labels)] + } # scale responses if(scale == TRUE){ diff --git a/R/var_hd.R b/R/var_hd.R index 8be2782..979e2fb 100644 --- a/R/var_hd.R +++ b/R/var_hd.R @@ -4,10 +4,9 @@ #' Estimate historical decomposition #' -#' Estimate historical decomposition with contemporaneous impact -#' restrictions via Cholesky decomposition - taking the variable ordering -#' present in the VAR object. -#' +#' Estimate historical decomposition for VARs with +#' either short or 'IV-short' structural errors. +#' #' @param var VAR output #' #' @return long-from data.frame @@ -57,6 +56,15 @@ var_hd = function( var # VAR object ){ + + # function warnings + if(class(var) != 'VAR'){ + stop('var must be a VAR object') + } + if(!var$structure %in% c('short', 'IV-short')){ + stop('HD method is only available for VARs with short or IV-short structural errors') + } + # set function variables model.regime = target = forecast.date = NULL @@ -73,8 +81,10 @@ var_hd = function( # reduced form error variance-covariance matrix sigma = stats::var(stats::na.omit(residuals)) - # structural error variance-covariance matrix - B = chol(sigma) + # estimate error-covariance matrix or structural impact matrix + B = solve_B(var) + B = as.matrix(B) + # coefficient matrix # (already written in psuedo-companion form) @@ -169,10 +179,8 @@ var_hd = function( #' Estimate regime-dependent historical decomposition #' -#' Estimate historical decomposition with contemporaneous impact -#' restrictions via Cholesky decomposition - taking the variable ordering -#' present in the RVAR object. Estimate one response function per unique -#' state defined by the regime-dependent VAR. +#' Estimate historical decomposition for RVARs with +#' either short or 'IV-short' structural errors. #' #' @param rvar RVAR output #' @@ -226,6 +234,13 @@ rvar_hd = function( rvar # threshold VAR output ){ + # function warnings + if(class(rvar) != 'RVAR'){ + stop('rvar must be a RVAR object') + } + if(!rvar$structure %in% c('short', 'IV-short')){ + stop('HD method is only available for VARs with short or IV-short structural errors') + } # function variables model.regime = target = forecast.date = NULL @@ -243,18 +258,37 @@ rvar_hd = function( purrr::map(.f = function(regime.val){ # recover model dynamics ----------------------------- - # (for now, only use cholesky decomposition) # regime-dependent reduced form residuals residuals = rvar$residuals$H_1 %>% - # dplyr::filter(model.regime == regime.val) %>% dplyr::select(-date, -forecast.date, -model.regime) - # reduced form error variance-covariance matrix - sigma = stats::var(stats::na.omit(residuals)) + is = rvar$data %>% + dplyr::inner_join(dplyr::select(rvar$residuals$H_1, date), by = 'date') %>% + dplyr::select(-regime) + + # instrument + if(!is.null(rvar$instrument)){ + instrument = dplyr::inner_join(rvar$instrument, dplyr::select(is, date), by = 'date') + }else{ + instrument = NULL + } + + # regime-specific model + model = rvar$model[[paste0('regime_',regime.val)]] # structural error variance-covariance matrix - B = chol(sigma) + B = + solve_B( + var = list( + model = model, + residuals = dplyr::inner_join(rvar$residuals$H_1, dplyr::select(is, date), by = 'date') %>% + dplyr::select(-model.regime), + structure = rvar$structure, + instrument = instrument, + instrumented = rvar$instrumented + ) + ) # coefficient matrix # (already written in psuedo-companion form) diff --git a/R/var_irf.R b/R/var_irf.R index b88a446..09960d4 100644 --- a/R/var_irf.R +++ b/R/var_irf.R @@ -1,213 +1,8 @@ - -#------------------------------------------ -# Function to estimate the structural -# error impact matrix -# -# See Cesa-Bianchi's VAR-Toolbox for -# MATLAB implementation -#------------------------------------------ -solve_B = function(var, report_iv = TRUE){ - - if(is.null(var$structure) == TRUE){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - B = cov.matrix - - return(B) - - }else if(var$structure == 'short'){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - # take cholesky decomposition - B = t(chol(cov.matrix)) - - return(B) - - }else if(var$structure == 'IV'){ - - # retrieve reduced-form residuals - data.residuals = var$residuals[[1]] - covariates = colnames(dplyr::select(data.residuals, -date, -forecast.date)) - col_to_instrument = var$instrumented - - # retrieve instrument - data.instrument = var$instrument - instrument = colnames(dplyr::select(data.instrument, -date)) - - # combine data - data = - dplyr::inner_join(data.residuals, data.instrument, by = 'date') %>% - na.omit() - - # first stage least squares - model.first_stage = - lm(col_to_instrument ~., - data = data %>% - select(col_to_instrument = dplyr::all_of(col_to_instrument), all_of(instrument)) %>% - na.omit()) - p_hat = model.first_stage$fitted.values - - if(report_iv == TRUE){ - print(summary(model.first_stage)) - } - - # second stage least squares - # to estimate first column of B - # (automatically scales first entry to 1) - instrumented_shocks = covariates %>% - purrr::map_df(.f = function(X){ - - model.second_stage = lm(data[,X] ~ p_hat) - second_stage_beta = model.second_stage$coefficients[2] - return(second_stage_beta) - - }) %>% - as.vector() - - # scale size of the shock - # see Gertler and Karadi (2015) for background - # see Cesa-Bianchi's VAR-Toolbox for MATLAB implementation - X.demean = - select(data, -date, -forecast.date, -dplyr::all_of(instrument)) %>% - mutate_all(function(X){return(X - mean(X, na.rm = T))}) %>% - as.matrix() - - sigma_b = 1/(nrow(data) - ncol(var$model$coef) + 1) * t(X.demean) %*% X.demean - - s21s11 = instrumented_shocks[2:length(covariates),] %>% as.matrix() - S11 = sigma_b[1,1] %>% as.numeric() - S21 = sigma_b[2:nrow(sigma_b),1] %>%as.vector() - S22 = sigma_b[2:nrow(sigma_b),2:ncol(sigma_b)] - - Q = (s21s11 * S11) %*% t(s21s11) - (S21 %*% t(s21s11) + as.matrix(s21s11) %*% t(S21)) + S22 - sp = sqrt( S11 - t(S21 - as.matrix(s21s11) %*% S11) %*% as.matrix(solve(Q) %*% (S21 - s21s11 * S11)) ) - - scaled_instrumented_shocks = instrumented_shocks * as.numeric(sp) - - # prepare B matrix - B = matrix(0, ncol = (length(covariates) - 1), nrow = length(covariates)) - B = cbind(scaled_instrumented_shocks, B) - - return(B) - - }else if(var$structure == 'IV-short'){ - - # align instrument and residuals - valid_dates = - dplyr::inner_join( - dplyr::select(na.omit(var$residuals[[1]]), date), - dplyr::select(na.omit(var$instrument), date), - by = 'date' - ) - - # set residuals matrix - residuals = var$residuals[[1]] %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date, -forecast.date) %>% - as.matrix() - - residuals = -1*residuals - - # set instrument - instrument = var$instrument %>% - data.frame() %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date) - - instrument.mean = instrument %>% - dplyr::mutate_all(mean, na.rm = T) - - # number of observations - n.obs = nrow(var$data) - - # solve for IV implied impact - pi = solve(n.obs^(-1) * t(residuals) %*% residuals) %*% - (n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean)) - - phi_sq = - (n.obs^(-1) * t(instrument - instrument.mean) %*% residuals) %*% - solve( n.obs^(-1) * t(residuals) %*% residuals ) %*% - ( n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean) ) - - B1 = - n.obs^(-1) * - ( t(residuals) %*% as.matrix(instrument - instrument.mean) ) %*% - (phi_sq)^(-0.5) - - B = matrix(ncol = ncol(residuals), nrow = ncol(residuals)) - B[,1:ncol(instrument)] = B1 - rownames(B) = colnames(B) = colnames(residuals) - - # solve for orthogonalized structural shock - model.first_stage = lm(instrument[,1] ~ residuals) - orthogonal_instrument = instrument - model.first_stage$residuals - orthogonal_instrument = orthogonal_instrument[,] / sd(orthogonal_instrument[,], na.rm = T) - - shock_matrix = matrix(nrow = nrow(residuals), ncol = ncol(residuals)) - shock_matrix[,1] = orthogonal_instrument - - # reduced form variance-covariance matrix - sigma = stats::var(residuals) - - # solve additional entries - # with a lower triangular restriction - order_sequence = c(1:ncol(residuals)) - - for(i in order_sequence){ - - Y = residuals[,i] - X = shock_matrix[,c(1:i)] - model.second_stage = lm(Y~X) - - if(i != tail(order_sequence,1)){ - - B[i,] = - c( model.second_stage$coef[-1], - sd(model.second_stage$residuals), - rep(0, length(order_sequence) - ncol(instrument) - i) ) - - shock_matrix[,i+1] = model.second_stage$residuals / sd(model.second_stage$residuals) - - }else{ - - B[i,] = model.second_stage$coef[-1] - - } - - } - - # make diagonal entries positive - shock_ordering = data.frame(residuals) %>% - select(var$instrumented, dplyr::everything()) %>% - colnames() - B.sign = diag(sign(diag(B[shock_ordering,]))) - B = B %*% B.sign - - return(B) - - }else{ - - stop('structure must be set to either NULL, "short", or "IV".') - - } - -} - #------------------------------------------ # Function to estimate impulse responses # source code adapted from the MTS package #------------------------------------------ -IRF = function (Phi, B, lag, structure = NULL){ +IRF_solve = function(Phi, B, lag, structure = NA){ # cast data as matrices if (!is.matrix(Phi)) @@ -263,7 +58,7 @@ IRF = function (Phi, B, lag, structure = NULL){ irf = Si - if (!is.null(structure)) { + if (!is.na(structure)) { P = B wk1 = cbind(wk1, c(P)) @@ -292,15 +87,15 @@ IRF = function (Phi, B, lag, structure = NULL){ #' Estimate impulse response functions #' -#' Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -#' taking the variable ordering present in the VAR object. -#' -#' @param var VAR output -#' @param horizon int: number of periods -#' @param bootstraps.num int: number of bootstraps -#' @param CI numeric vector: c(lower ci bound, upper ci bound) +#' @param var VAR output +#' @param horizon int: number of periods +#' @param CI numeric vector: c(lower ci bound, upper ci bound) +#' @param bootstrap.type string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used +#' @param bootstrap.num int: number of bootstraps +#' @param bootstrap.parallel boolean: create IRF draws in parallel +#' @param bootstrap.cores int: number of cores to use in parallel processing; -1 detects and uses half the available cores #' -#' @return list object with elements `irfs`, `ci.lower`, and `ci.upper`; all elements are long-form data.frames +#' @return data.frame with columns `target`, `shock`, `horizon`, `response.lower`, `response`, `response.upper` #' #' @seealso [VAR()] #' @seealso [var_irf()] @@ -330,20 +125,19 @@ IRF = function (Phi, B, lag, structure = NULL){ #' lag.ic = 'BIC', #' lag.max = 4) #' -#' # impulse response functions -#' var.irf = sovereign::var_irf(var) +#' # impulse response functions +#' var.irf = sovereign::var_irf(var) #' -#' # forecast error variance decomposition -#' var.fevd = sovereign::var_fevd(var) +#' # forecast error variance decomposition +#' var.fevd = sovereign::var_fevd(var) #' -#' # historical shock decomposition -#' var.hd = sovereign::var_hd(var) +#' # historical shock decomposition +#' var.hd = sovereign::var_hd(var) #' #' } #' #' @export - var_irf = function( var, # VAR output horizon = 10, # int: number of periods @@ -354,9 +148,6 @@ var_irf = function( bootstrap.cores = -1 # int: number of cores to use in parallel processing; -1 detects and uses half the available cores ){ - # NOTES: scaled wild is used for IV, consistent with the proxy SVAR literature, - # while standard resample is used for others - # function warnings if(!is.numeric(bootstrap.num) | bootstrap.num %% 1 != 0){ stop('bootstrap.num must be an integer') @@ -369,7 +160,7 @@ var_irf = function( } # function variables - y = forecast.date = shock = target = response = response.lower = response.upper = response.med = response.adjust = NULL + y = forecast.date = response.mean = shock = target = response = response.lower = response.upper = response.med = response.adjust = NULL # set data coef = var$model$coef @@ -379,15 +170,11 @@ var_irf = function( # set model variables p = var$model$p freq = var$model$freq - structure = var$structure + type = var$model$type - if('const' %in% colnames(coef) & 'trend' %in% colnames(coef)){ - type = 'both' - }else if('const' %in% colnames(coef)){ - type = 'const' - }else if('trend' %in% colnames(coef)){ - type = 'trend' - } + structure = var$structure + instrument = var$instrument + instrumented = var$instrumented regressors = colnames(dplyr::select(data, -date)) if(!is.null(var$regime)){regressors = regressors[regressors != var$regime$regime]} @@ -404,27 +191,45 @@ var_irf = function( B = as.matrix(B) # estimate IRFs - phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')) + phi = dplyr::select(coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) - irf = IRF(Phi = phi, - B = B, - lag = horizon, - structure = structure) + irf = + IRF_solve( + Phi = phi, + B = B, + lag = horizon, + structure = structure + ) # reorganize results irf = data.frame(t(irf)) - if(!is.null(structure)){ - if(structure == 'IV-short'){ - shocks = c(var$instrumented, regressors[!regressors %in% var$instrumented]) - irf$shock = rep(shocks, horizon + 1) - } - }else{ + # assign shocks + if(is.na(structure)){ + irf$shock = rep(regressors, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(structure == 'IV-short'){ + + shocks = c(instrumented, regressors[!regressors %in% instrumented]) + irf$shock = rep(shocks, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(structure == 'IV'){ + + irf = irf[(c(1:(horizon))*length(regressors)-length(regressors)+1),] + irf$shock = instrumented + irf$horizon = c(0:(horizon-1)) + + }else if(structure == 'short'){ + + irf$shock = rep(regressors, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + } - irf$horizon = sort(rep(c(0:horizon), length(regressors))) - irf = dplyr::arrange(irf, shock, horizon) + # observation identifiers rownames(irf) = NULL colnames(irf) = c(regressors, 'shock', 'horizon') @@ -448,8 +253,7 @@ var_irf = function( # bootstrap residuals if(bootstrap.type == 'wild' | - bootstrap.type == 'auto' & structure == 'IV' | - bootstrap.type == 'auto' & structure == 'IV-short'){ + bootstrap.type == 'auto' & structure %in% c('IV', 'IV-short')){ # 'wild' bootstrap technique for simple distribution # using observed scaled residuals with random sign flip. @@ -458,11 +262,18 @@ var_irf = function( r = sample(c(-1,1), size = nrow(U), replace = T) U = sweep(U, MARGIN = 1, r, `*`) - }else if(bootstrap.type == 'standard' | bootstrap.type == 'auto' & structure != 'IV'){ + # bootstrap instrument + if(!is.null(instrument)){ + instrument.bag = instrument + instrument.bag[,-1] = instrument.bag[,-1] * r + } + + }else if(bootstrap.type == 'standard' | + bootstrap.type == 'auto' & !structure %in% c('IV', 'IV-short')){ - # standard bootstrap technique a al Lutkepohl (2005) + # standard resample bootstrap technique a al Lutkepohl (2005) # draw residuals with replacement - U = na.omit(residuals) + U = stats::na.omit(residuals) U = U[sample(c(1:nrow(U)), size = nrow(residuals), replace = TRUE),] @@ -479,7 +290,7 @@ var_irf = function( for(i in (p+1):nrow(var$data)){ X = Y[(i-p):(i-1),] - X = embed(as.matrix(X), dimension = p) + X = stats::embed(as.matrix(X), dimension = p) X = data.frame(X) if(type %in% c('const', 'both')){X$const = 1} @@ -501,18 +312,12 @@ var_irf = function( p = p, horizon = 1, freq = freq, - type = type, - structure = structure) - - # bootstrap instrument - if(!is.null(structure)){ - if(structure == 'IV' | structure == 'IV-short'){ - var.bag$instrument = var$instrument - var.bag$instrument[,-1] = sweep(var.bag$instrument[,-1], MARGIN = 1, r, `*`) + type = type + ) - var.bag$instrumented = var$instrumented - } - } + var.bag$structure = structure + var.bag$instrumented = instrumented + if(!is.null(instrument)){var.bag$instrument = instrument.bag} # estimate error-covariance matrix or structural impact matrix B.bag = solve_B(var.bag, report_iv = FALSE) @@ -521,60 +326,64 @@ var_irf = function( coef.bag = dplyr::select(var.bag$model$coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) # estimate IRFs - irf.bag = IRF(Phi = coef.bag, - B = B.bag, - lag = horizon, - structure = structure) + irf.bag = + IRF_solve( + Phi = coef.bag, + B = B.bag, + lag = horizon, + structure = structure + ) # reorganize results irf.bag = data.frame(t(irf.bag)) - if(!is.null(structure)){ - if(structure == 'IV-short'){ - shocks = c(var$instrumented, regressors[!regressors %in% var$instrumented]) - irf.bag$shock = rep(shocks, horizon + 1) - } - }else{ - irf.bag$shock = rep(regressors, horizon + 1) + # assign shocks + if(is.na(structure)){ + + irf.bag$shock = rep(regressors, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(structure == 'IV-short'){ + + shocks = c(var$instrumented, regressors[!regressors %in% var$instrumented]) + irf.bag$shock = rep(shocks, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(structure == 'IV'){ + + irf.bag = irf.bag[(c(1:(horizon))*length(regressors)-length(regressors)+1),] + irf.bag$shock = var$instrumented + irf.bag$horizon = c(0:(horizon-1)) + + }else if(structure == 'short'){ + + irf.bag$shock = rep(regressors, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + } - irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) - irf.bag = dplyr::arrange(irf.bag, shock, horizon) + # observation identifiers rownames(irf.bag) = NULL colnames(irf.bag) = c(regressors, 'shock', 'horizon') return(irf.bag) - }) - - # 3. calculate confidence intervals + }, + .options = furrr::furrr_options(seed = TRUE)) + # 2. calculate confidence intervals # collapse to data.frame ci = bagged.irf %>% purrr::reduce(dplyr::bind_rows) - # remove unused shocks in the case of IV - if(!is.null(var$structure)){ - if(var$structure == 'IV'){ - ci = ci %>% - dplyr::filter(shock == regressors[1]) - } - } - - # correct shock names in the case of IV-short - if(!is.null(var$structure)){ - if(var$structure == 'IV-short'){ - - } - } - # estimate bands ci = ci %>% tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% - group_by(shock, horizon, target) %>% - summarize(response.lower = quantile(response, probs = p.lower, na.rm = T), - response.upper = quantile(response, probs = p.upper, na.rm = T), - response.mean = mean(response, na.rm = T) ) + dplyr::group_by(shock, horizon, target) %>% + dplyr::summarize( + response.lower = stats::quantile(response, probs = p.lower, na.rm = T), + response.upper = stats::quantile(response, probs = p.upper, na.rm = T), + response.mean = mean(response, na.rm = T) ) # combine point estimates and CI irf = irf %>% @@ -582,7 +391,7 @@ var_irf = function( dplyr::arrange(shock, horizon) irf = - full_join( + dplyr::full_join( irf, ci, by = c('shock', 'target', 'horizon') ) @@ -591,29 +400,29 @@ var_irf = function( # (bias can be introduced in the bootstrapping if residuals are not actually mean zero) irf = irf %>% dplyr::mutate( - response.adjust = response - response.mean, - response.lower = response.lower + response.adjust, - response.upper = response.upper + response.adjust + response.adjust = response - response.mean, + response.lower = response.lower + response.adjust, + response.upper = response.upper + response.adjust ) %>% dplyr::select(target, shock, horizon, response.lower, response, response.upper) %>% dplyr::arrange(target, shock, horizon) ### return output -------------- return(irf) + } #' Estimate regime-dependent impulse response functions #' -#' Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -#' taking the variable ordering present in the VAR object. Estimate one response function per unique -#' state defined by the regime-dependent VAR. +#' @param rvar RVAR output +#' @param horizon int: number of periods +#' @param CI numeric vector: c(lower ci bound, upper ci bound) +#' @param bootstrap.type string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used +#' @param bootstrap.num int: number of bootstraps +#' @param bootstrap.parallel boolean: create IRF draws in parallel +#' @param bootstrap.cores int: number of cores to use in parallel processing; -1 detects and uses half the available cores #' -#' @param rvar RVAR output -#' @param horizon int: number of periods -#' @param bootstraps.num int: number of bootstraps -#' @param CI numeric vector: c(lower ci bound, upper ci bound) -#' -#' @return list of lists, each regime returns its own list with elements `irfs`, `ci.lower`, and `ci.upper`; all elements are long-form data.frames +#' @return list of regimes, each with data.frame of columns `target`, `shock`, `horizon`, `response.lower`, `response`, `response.upper` #' #' @seealso [VAR()] #' @seealso [var_irf()] @@ -644,29 +453,32 @@ var_irf = function( #' lag.ic = 'BIC', #' lag.max = 4) #' -#' # impulse response functions -#' rvar.irf = sovereign::rvar_irf(rvar) +#' # impulse response functions +#' rvar.irf = sovereign::rvar_irf(rvar) #' -#' # forecast error variance decomposition -#' rvar.fevd = sovereign::rvar_fevd(rvar) +#' # forecast error variance decomposition +#' rvar.fevd = sovereign::rvar_fevd(rvar) #' -#' # historical shock decomposition -#' rvar.hd = sovereign::rvar_hd(rvar) +#' # historical shock decomposition +#' rvar.hd = sovereign::rvar_hd(rvar) #' #' } #' #' @export rvar_irf = function( - rvar, # threshold VAR output - horizon = 10, # int: number of periods - bootstraps.num = 100, # int: number of bootstraps - CI = c(0.1, 0.9) # numeric vector: c(lower ci bound, upper ci bound) + rvar, # threshold VAR output + horizon = 10, # int: number of periods + CI = c(0.1, 0.9), # numeric vector: c(lower ci bound, upper ci bound) + bootstrap.type = 'auto', # string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used + bootstrap.num = 100, # int: number of bootstraps + bootstrap.parallel = FALSE, # boolean: create IRF draws in parallel + bootstrap.cores = -1 # int: number of cores to use in parallel processing; -1 detects and uses half the available cores ){ # function warnings - if(!is.numeric(bootstraps.num) | bootstraps.num %% 1 != 0){ - stop('bootstraps.num must be an integer') + if(!is.numeric(bootstrap.num) | bootstrap.num %% 1 != 0){ + stop('bootstrap.num must be an integer') } if(!is.numeric(CI) | length(CI) != 2 | min(CI) < 0 | max(CI) > 1 | is.na(sum(CI))){ stop('CI must be a two element numeric vector bound [0,1]') @@ -676,25 +488,18 @@ rvar_irf = function( } # function variables - y = forecast.date = shock = target = response = response.lower = response.upper = model.regime = response.med = response.adjust = NULL + y = forecast.date = response.mean = shock = target = response = response.lower = response.upper = model.regime = response.med = response.adjust = NULL # set data data = rvar$data p = rvar$model[[1]]$p freq = rvar$model[[1]]$freq + type = rvar$model[[1]]$type + regime = rvar$regime regressors = colnames(dplyr::select(data, -date, -regime)) - residuals = rvar$residuals[[1]] - - if('const' %in% colnames(rvar$model[[1]]$coef) & - 'trend' %in% colnames(rvar$model[[1]]$coef)){ - type = 'both' - }else if('const' %in% colnames(rvar$model[[1]]$coef)){ - type = 'const' - }else if('trend' %in% colnames(rvar$model[[1]]$coef)){ - type = 'trend' - } + structure = rvar$structure p.lower = CI[1] p.upper = CI[2] @@ -708,135 +513,247 @@ rvar_irf = function( # set regime specific data coef = rvar$model[[paste0('regime_',regime.val)]]$coef - residuals = residuals %>% - dplyr::filter(model.regime == regime.val) + + residuals = rvar$residuals[[1]] %>% + dplyr::filter(model.regime == regime.val) %>% + dplyr::select(-model.regime) + is = data %>% dplyr::inner_join(dplyr::select(residuals, date), by = 'date') %>% - dplyr::rename(regime = regime) - residuals = residuals %>% - dplyr::select(-date, -model.regime, -forecast.date) + dplyr::select(-regime) + + if(!is.null(rvar$instrument)){ + instrument = dplyr::inner_join(rvar$instrument, dplyr::select(is, date), by = 'date') + }else{ + instrument = NULL + } + + model = rvar$model[[paste0('regime_',regime.val)]] ### calculate impulse responses -------------- - # estimate error-covariance matrix - cov.matrix = stats::var(stats::na.omit(residuals)) + + # estimate error-covariance matrix or structural impact matrix + B = + solve_B( + var = list( + model = model, + residuals = residuals, + structure = rvar$structure, + instrument = instrument, + instrumented = rvar$instrumented + ) + ) # estimate IRFs - irf = IRF(Phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')), - Sig = cov.matrix, - lag = horizon) + phi = dplyr::select(coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) + + irf = IRF_solve( + Phi = phi, + B = B, + lag = horizon, + structure = rvar$structure + ) # reorganize results - irf = data.frame(t(irf$orthirf)) - irf$shock = rep(regressors, horizon + 1) - irf$horizon = sort(rep(c(0:horizon), length(regressors))) - irf = irf %>% dplyr::arrange(shock, horizon) + irf = data.frame(t(irf)) + + # assign shocks + if(is.na(rvar$structure)){ + + irf$shock = rep(regressors, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(rvar$structure == 'IV-short'){ + + shocks = c(rvar$instrumented, regressors[!regressors %in% rvar$instrumented]) + irf$shock = rep(shocks, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(rvar$structure == 'IV'){ + + irf = irf[(c(1:(horizon))*length(regressors)-length(regressors)+1),] + irf$shock = rvar$instrumented + irf$horizon =c(0:(horizon-1)) + + }else if(rvar$structure == 'short'){ + + irf$shock = rep(regressors, horizon + 1) + irf$horizon = sort(rep(c(0:horizon), length(regressors))) + + } + + # observation identifiers rownames(irf) = NULL + colnames(irf) = c(regressors, 'shock', 'horizon') ### bootstrap irf standard errors -------------- - # see Lutkepohl (2005) + + # 0. set up parallel back-end + if(bootstrap.parallel == TRUE){ + if(bootstrap.cores == -1){ + n.cores = floor(future::availableCores() / 2) + }else{ + n.cores = bootstrap.cores + } + future::plan(future::multisession, workers = n.cores) + }else{ + future::plan(future::sequential) + } # 1. create bootstrap time series - bagged.series = as.list(1:bootstraps.num) %>% - purrr::map(.f = function(count){ - - # draw bootstrapped residuals - U = residuals[sample(c(1:nrow(residuals)), - size = nrow(residuals), - replace = TRUE),] - U = U %>% - dplyr::mutate_all(function(X){return(X-mean(X, na.rm = T))}) - - # create lags - X = is %>% - dplyr::select(-regime) %>% - n.lag(lags = p) %>% - dplyr::select(dplyr::contains('.l')) %>% - dplyr::slice(p:nrow(U)) - - if('const' %in% type | 'both' %in% type){X$const = 1} - if('trend' %in% type | 'both' %in% type){X$trend = c(1:nrow(X))} - - # estimate time series - Y = as.matrix(X) %*% as.matrix(t(coef[,-1])) - Y = Y + U - colnames(Y) = regressors - Y = data.frame(Y, date = is$date[1:nrow(Y)]) - - # return synthetic observations - return(Y) - - }) - - # 2. create bootstrapped residuals - bagged.irf = bagged.series %>% - purrr::map(.f = function(synth){ - - # estimate VAR with bootstrapped series + bagged.irf = as.list(1:bootstrap.num) %>% + furrr::future_map(.f = function(count){ + + # bootstrap residuals + if(bootstrap.type == 'wild' | + bootstrap.type == 'auto' & structure %in% c('IV', 'IV-short')){ + + # 'wild' bootstrap technique for simple distribution + # using observed scaled residuals with random sign flip. + # See the Rademacher distribution. + U = residuals[,!colnames(residuals) %in% c('date','forecast.date','model.regime')] + r = sample(c(-1,1), size = nrow(U), replace = T) + U = sweep(U, MARGIN = 1, r, `*`) + + # bootstrap instrument + if(!is.null(instrument)){ + instrument.bag = instrument + instrument.bag[,-1] = instrument.bag[,-1] * r + } + + }else if(bootstrap.type == 'standard' | + bootstrap.type == 'auto' & !structure %in% c('IV', 'IV-short')){ + + # standard bootstrap technique a al Lutkepohl (2005) + # draw residuals with replacement + U = stats::na.omit(residuals) + U = U[sample(c(1:nrow(U)), + size = nrow(residuals), + replace = TRUE),] + U = U %>% + dplyr::select(-date, -forecast.date, -model.regime) %>% + dplyr::mutate_all(function(X){return(X-mean(X, na.rm = T))}) + } + + # recursively build synthetic data + Y = matrix(nrow = nrow(is), ncol = ncol(is)-1) + Y = data.frame(Y); colnames(Y) = regressors + Y[1:p, ] = is[1:p, -1] + + for(i in (p+1):nrow(is)){ + + X = Y[(i-p):(i-1),] + X = stats::embed(as.matrix(X), dimension = p) + X = data.frame(X) + + if(type %in% c('const', 'both')){X$const = 1} + if(type %in% c('trend', 'both')){X$trend = c(1:nrow(X))} + + X.hat = as.matrix(coef[,-1]) %*% t(as.matrix(X)) + X.hat = t(X.hat) + + Y[i, ] = X.hat - U[i,] + + } + + Y$date = is$date + + # estimate VAR with synthetic data var.bag = VAR( - data = synth, + data = Y, p = p, horizon = 1, freq = freq, - type = type) + type = type + ) - # estimate error-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(var.bag$residuals[[1]], -date, -forecast.date))) + var.bag$structure = rvar$structure + var.bag$instrumented = rvar$instrumented + if(!is.null(instrument)){var.bag$instrument = instrument.bag} + + # estimate error-covariance matrix or structural impact matrix + B.bag = solve_B(var.bag, report_iv = FALSE) + + # set bagged coef matrix + coef.bag = dplyr::select(var.bag$model$coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) # estimate IRFs - irf = IRF(Phi = dplyr::select(coef, -y, -dplyr::contains('cosnt'), -dplyr::contains('trend')), - Sig = cov.matrix, - lag = horizon) + irf.bag = + IRF_solve( + Phi = coef.bag, + B = B.bag, + lag = horizon, + structure = rvar$structure + ) # reorganize results - irf = data.frame(t(irf$orthirf)) - irf$shock = rep(regressors, horizon + 1) - irf$horizon = sort(rep(c(0:horizon), length(regressors))) - irf = irf %>% dplyr::arrange(shock, horizon) - rownames(irf) = NULL - - return(irf) - - }) - - # 3. calculate confidence intervals - ci.lower = bagged.irf %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::group_by(shock, horizon) %>% - dplyr::summarise_all(stats::quantile, p.lower, na.rm = T) %>% - dplyr::arrange(shock, horizon) %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response.lower') %>% - dplyr::arrange(shock, horizon) + irf.bag = data.frame(t(irf.bag)) - ci.upper = bagged.irf %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::group_by(shock, horizon) %>% - dplyr::summarise_all(stats::quantile, p.upper, na.rm = T) %>% - dplyr::arrange(shock, horizon) %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response.upper') %>% - dplyr::arrange(shock, horizon) + # assign shocks + if(is.na(rvar$structure)){ - ci.med = bagged.irf %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::group_by(shock, horizon) %>% - dplyr::summarise_all(stats::quantile, 0.5, na.rm = T) %>% - dplyr::arrange(shock, horizon) %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response.med') %>% - dplyr::arrange(shock, horizon) + irf.bag$shock = rep(regressors, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(rvar$structure == 'IV-short'){ + + shocks = c(rvar$instrumented, regressors[!regressors %in% rvar$instrumented]) + irf.bag$shock = rep(shocks, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + + }else if(rvar$structure == 'IV'){ + + irf.bag = irf.bag[(c(1:(horizon))*length(regressors)-length(regressors)+1),] + irf.bag$shock = rvar$instrumented + irf.bag$horizon = c(0:(horizon-1)) + }else if(rvar$structure == 'short'){ + + irf.bag$shock = rep(regressors, horizon + 1) + irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) + + } + + # observation identifiers + rownames(irf.bag) = NULL + colnames(irf.bag) = c(regressors, 'shock', 'horizon') + + return(irf.bag) + + }, + .options = furrr::furrr_options(seed = TRUE)) + + # 2. calculate confidence intervals + # collapse to data.frame + ci = bagged.irf %>% + purrr::reduce(dplyr::bind_rows) + + # estimate bands + ci = ci %>% + tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% + dplyr::group_by(shock, horizon, target) %>% + dplyr::summarize( + response.lower = stats::quantile(response, probs = p.lower, na.rm = T), + response.upper = stats::quantile(response, probs = p.upper, na.rm = T), + response.mean = mean(response, na.rm = T) ) + + # combine point estimates and CI irf = irf %>% tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% dplyr::arrange(shock, horizon) irf = - purrr::reduce( - list(irf, ci.lower, ci.upper, ci.med), - dplyr::full_join, - by = c('shock', 'target', 'horizon')) + dplyr::full_join( + irf, ci, + by = c('shock', 'target', 'horizon') + ) + # adjust for bias in CI + # (bias can be introduced in the bootstrapping if residuals are not actually mean zero) irf = irf %>% dplyr::mutate( - response.adjust = response - response.med, + response.adjust = response - response.mean, response.lower = response.lower + response.adjust, response.upper = response.upper + response.adjust ) %>% @@ -855,4 +772,3 @@ rvar_irf = function( } - diff --git a/README.md b/README.md index ac896e5..d9d9dfa 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,8 @@ Local Projections (LP) 1. direct projection forecasting 1. impulse responses -Vector Auto-Regression (VAR) +Vector Auto-Regression (VAR), Structural VAR (SVAR), +and external instrument SVAR (Proxy-SVAR) 1. recursive forecasting 2. impulse responses 3. forecast error variance decomposition @@ -87,7 +88,9 @@ Vector Auto-Regression (VAR) # single-regime var #------------------------------------------ # estimate VAR - # (using IC lag selection) + # (using IC lag selection and short-term + # impact restrictions, i.e. calculate structurals + errors via Cholesky decomposition) var = VAR( data = Data, @@ -110,7 +113,7 @@ Vector Auto-Regression (VAR) # estimate IRF irf = - var_irf( + IRF( var, bootstraps.num = 10, CI = c(0.05,0.95)) @@ -120,13 +123,19 @@ Vector Auto-Regression (VAR) # estimate forecast error variance decomposition fevd = - var_fevd( + FEVD( var, horizon = 10) # plot FEVD plot_fevd(fevd) + # estimate historical decomposition + hd = HD(var) + + # plot HD + plot_hd(hd) + #------------------------------------------- # multi-regime var #------------------------------------------- @@ -141,7 +150,7 @@ Vector Auto-Regression (VAR) # estimate IRF rvar.irf = - rvar_irf( + IRF( rvar, horizon = 10, bootstraps.num = 10, @@ -155,7 +164,7 @@ Vector Auto-Regression (VAR) # estimate forecast error variance decomposition rvar.fevd = - rvar_fevd( + FEVD( rvar, horizon = 10) @@ -165,6 +174,9 @@ Vector Auto-Regression (VAR) # regime 2: high interest rates plot_fevd(rvar.fevd[[2]]) + # estimate HD + rvar.hd = HD(rvar) + #------------------------------------------- # single-regime local projections #------------------------------------------- @@ -178,7 +190,7 @@ Vector Auto-Regression (VAR) freq = 'month') # estimate single-regime IRF - lp.irf = lp_irf(lp) + lp.irf = IRF(lp) # plot IRF plot_irf(lp.irf) @@ -196,7 +208,7 @@ Vector Auto-Regression (VAR) freq = 'month') # estimate multi-regime IRF - rlp.irf = rlp_irf(rlp) + rlp.irf = IRF(rlp) # plot IRF # regime 1: low interest rates @@ -207,4 +219,4 @@ Vector Auto-Regression (VAR) --- ## Contact -If you should have questions, concerns, or wish to collaborate, please contact [Tyler J. Pike](https://tylerjpike.github.io/) +If you should have questions, concerns, or wish to collaborate, please contact [Tyler J. Pike](https://tylerjpike.github.io/) \ No newline at end of file diff --git a/_pkgdown.yml b/_pkgdown.yml index d114765..78cd453 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -27,14 +27,9 @@ reference: - title: Model and Forecast Analysis desc: Functions to anlayze model and forecasting dynamics contents: - - '`var_irf`' - - '`rvar_irf`' - - '`var_fevd`' - - '`rvar_fevd`' - - '`var_hd`' - - '`rvar_hd`' - - '`lp_irf`' - - '`rlp_irf`' + - '`IRF`' + - '`FEVD`' + - '`HD`' - title: Plots desc: Functions to plot model and forecast analysis contents: diff --git a/cran-comments.md b/cran-comments.md new file mode 100644 index 0000000..56cf847 --- /dev/null +++ b/cran-comments.md @@ -0,0 +1,15 @@ +# CRAN Comments + +## Test environments +* local Windows install, R 4.0.5 +* win-builder (devel and release) +* Ubuntu 16.04.6 (on travis-ci), R 4.0.2 +* R-hub Ubuntu Linux 20.04.1 LTS, R-release +* R-hub Fedora Linux, R-devel +* R-hub Windows Server 2008 R2 SP1, R-devel + +## R CMD check results +There were no ERRORs, WARNINGs, or NOTEs. + +## Downstream dependencies +There are currently no downstream dependencies for this package. \ No newline at end of file diff --git a/development_test.R b/development_test.R deleted file mode 100644 index 38cf9da..0000000 --- a/development_test.R +++ /dev/null @@ -1,1047 +0,0 @@ - -TVAR = function( - data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors - threshold, # string: variable to estimate threshold values - horizon = 10, # int: forecast horizons - freq = 'month', # string: frequency of data (day, week, month, quarter, year) - type = 'const', # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') - p = 1, # int: lags - lag.ic = NULL, # string: information criterion to choose the optimal number of lags ('AIC' or 'BIC') - lag.max = NULL, # int: maximum number of lags to test in lag selection - structure = 'short', # string: type of structural identification strategy to use in model analysis (NULL, 'short', or 'IV') - instrument = NULL, # string: name of instrumental variable contained in the data matrix - instrumented = NULL # string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data -){ - - # function warnings - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.null(lag.ic)){ - if(!lag.ic %in% c('BIC','AIC')){ - stop("lag.ic must be either 'BIC', 'AIC', or NULL") - } - } - if(!is.null(lag.max)){ - if(lag.max %% 1 != 0){ - stop('lag.max must be an integer if IC-based lag selection is used') - } - } - if(!is.matrix(data) & !is.data.frame(data)){ - stop('data must be a matrix or data.frame') - } - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - if(!freq %in% c('day','week','month','quarter','year')){ - stop("freq must be one of the following strings: 'day','week','month','quarter','year'") - } - if(!structure %in% c('short', 'IV', 'IV-short') & !is.null(structure)){ - stop("strucutre must be one of 'strucutre', 'IV', 'IV-short', or NULL.") - } - if(!is.null(instrument)){ - if(!instrument %in% colnames(data)){ - stop("instrument must be the name of a variable found in data.") - } - } - if(!is.null(instrumented)){ - if(!instrumented %in% colnames(data)){ - stop("instrumented must be the name of a variable found in data.") - } - } - - # cast as data frame if ts, xts, or zoo object - if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ - data = data.frame(date = zoo::index(date), data) - } - - # set aside instruments - if(!is.null(instrument)){ - data.instrument = dplyr::select(data, date, dplyr::all_of(instrument)) - data = dplyr::select(data, -dplyr::all_of(instrument)) - }else{ - data.instrument = NULL - } - - # detect variable to be instrumented - if(is.null(instrumented)){ - var_to_instrument = colnames(dplyr::select(data, -date))[1] - }else{ - var_to_instrument = instrumented - } - - - for(t in unique(data[,c(threshold)])){ - - data.threshold = data %>% - mutate(t_regime = if_else(threshold >= t, 1, 0)) - - rvar = - RVAR( - data = data.threshold, - horizon = horizon, - freq = freq, - type = type, - p = p, - lag.ic = lag.ic, - lag.max = lag.max, - regime = t_regime, - ) - - rvar.ll = rvar$ll - - } - - -} -##################################################################### -##################################################################### -##################################################################### - -solve_B = function(var, report_iv = TRUE){ - - # set residuals - if(class(var) == 'VAR'){ - residuals = var$residuals[[1]] - }else{ - residuals = var$residuals - } - - if(is.null(var$structure) == TRUE){ - - # retrieve reduced-form residuals - data.residuals = residuals - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - B = cov.matrix - - return(B) - - }else if(var$structure == 'short'){ - - # retrieve reduced-form residuals - data.residuals = residuals - - # reduced form variance-covariance matrix - cov.matrix = stats::var(stats::na.omit(dplyr::select(data.residuals, -date, -forecast.date))) - - # take cholesky decomposition - B = t(chol(cov.matrix)) - - return(B) - - }else if(var$structure == 'IV'){ - - # retrieve reduced-form residuals - data.residuals = residuals - covariates = colnames(dplyr::select(data.residuals, -date, -forecast.date)) - col_to_instrument = var$instrumented - - # retrieve instrument - data.instrument = var$instrument - instrument = colnames(dplyr::select(data.instrument, -date)) - - # combine data - data = - dplyr::inner_join(data.residuals, data.instrument, by = 'date') %>% - na.omit() - - # first stage least squares - model.first_stage = - lm(col_to_instrument ~., - data = data %>% - select(col_to_instrument = dplyr::all_of(col_to_instrument), all_of(instrument)) %>% - na.omit()) - p_hat = model.first_stage$fitted.values - - if(report_iv == TRUE){ - print(summary(model.first_stage)) - } - - # second stage least squares - # to estimate first column of B - # (automatically scales first entry to 1) - instrumented_shocks = covariates %>% - purrr::map_df(.f = function(X){ - - model.second_stage = lm(data[,X] ~ p_hat) - second_stage_beta = model.second_stage$coefficients[2] - return(second_stage_beta) - - }) %>% - as.vector() - - # scale size of the shock - # see Gertler and Karadi (2015) for background - # see Cesa-Bianchi's VAR-Toolbox for MATLAB implementation - X.demean = - select(data, -date, -forecast.date, -dplyr::all_of(instrument)) %>% - mutate_all(function(X){return(X - mean(X, na.rm = T))}) %>% - as.matrix() - - sigma_b = 1/(nrow(data) - ncol(var$model$coef) + 1) * t(X.demean) %*% X.demean - - s21s11 = instrumented_shocks[2:length(covariates),] %>% as.matrix() - S11 = sigma_b[1,1] %>% as.numeric() - S21 = sigma_b[2:nrow(sigma_b),1] %>%as.vector() - S22 = sigma_b[2:nrow(sigma_b),2:ncol(sigma_b)] - - Q = (s21s11 * S11) %*% t(s21s11) - (S21 %*% t(s21s11) + as.matrix(s21s11) %*% t(S21)) + S22 - sp = sqrt( S11 - t(S21 - as.matrix(s21s11) %*% S11) %*% as.matrix(solve(Q) %*% (S21 - s21s11 * S11)) ) - - scaled_instrumented_shocks = instrumented_shocks * as.numeric(sp) - - # prepare B matrix - B = matrix(0, ncol = (length(covariates) - 1), nrow = length(covariates)) - B = cbind(scaled_instrumented_shocks, B) - - return(B) - - }else if(var$structure == 'IV-short'){ - - # align instrument and residuals - valid_dates = - dplyr::inner_join( - dplyr::select(na.omit(residuals), date), - dplyr::select(na.omit(var$instrument), date), - by = 'date' - ) - - # set residuals matrix - residuals = residuals %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date, -forecast.date) %>% - as.matrix() - - residuals = -1*residuals - - # set instrument - instrument = var$instrument %>% - data.frame() %>% - dplyr::inner_join(valid_dates, by = 'date') %>% - dplyr::select(-date) - - instrument.mean = instrument %>% - dplyr::mutate_all(mean, na.rm = T) - - # number of observations - n.obs = nrow(residuals) - - # solve for IV implied impact - pi = solve(n.obs^(-1) * t(residuals) %*% residuals) %*% - (n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean)) - - phi_sq = - (n.obs^(-1) * t(instrument - instrument.mean) %*% residuals) %*% - solve( n.obs^(-1) * t(residuals) %*% residuals ) %*% - ( n.obs^(-1) * t(residuals) %*% as.matrix(instrument - instrument.mean) ) - - B1 = - n.obs^(-1) * - ( t(residuals) %*% as.matrix(instrument - instrument.mean) ) %*% - (phi_sq)^(-0.5) - - B = matrix(ncol = ncol(residuals), nrow = ncol(residuals)) - B[,1:ncol(instrument)] = B1 - rownames(B) = colnames(B) = colnames(residuals) - - # solve for orthogonalized structural shock - model.first_stage = lm(instrument[,1] ~ residuals) - orthogonal_instrument = instrument - model.first_stage$residuals - orthogonal_instrument = orthogonal_instrument[,] / sd(orthogonal_instrument[,], na.rm = T) - - shock_matrix = matrix(nrow = nrow(residuals), ncol = ncol(residuals)) - shock_matrix[,1] = orthogonal_instrument - - # reduced form variance-covariance matrix - sigma = stats::var(residuals) - - # solve additional entries - # with a lower triangular restriction - order_sequence = c(1:ncol(residuals)) - - for(i in order_sequence){ - - Y = residuals[,i] - X = shock_matrix[,c(1:i)] - model.second_stage = lm(Y~X) - - if(i != tail(order_sequence,1)){ - - B[i,] = - c( model.second_stage$coef[-1], - sd(model.second_stage$residuals), - rep(0, length(order_sequence) - ncol(instrument) - i) ) - - shock_matrix[,i+1] = model.second_stage$residuals / sd(model.second_stage$residuals) - - }else{ - - B[i,] = model.second_stage$coef[-1] - - } - - } - - # make diagonal entries positive - shock_ordering = data.frame(residuals) %>% - select(var$instrumented, dplyr::everything()) %>% - colnames() - B.sign = diag(sign(diag(B[shock_ordering,]))) - B = B %*% B.sign - - return(B) - - }else{ - - stop('structure must be set to either NULL, "short", or "IV".') - - } - -} - - -RVAR_estimate = function( - data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors - regime, # string: name or regime assignment vector in the design matrix (data) - p = 1, # int: lags - horizon = 10, # int: forecast horizons - freq = 'month', # string: frequency of data (day, week, month, quarter, year) - type = 'const' # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') -){ - - # function variables - term = estimate = std.error = model.regime = NULL - - # declare regressors - regressors = colnames(dplyr::select(data, -date, -regime)) - - # create regressors - Y = data.frame(data) %>% - dplyr::select(regressors, date) %>% - n.lag(lags = p) %>% - dplyr::full_join( - dplyr::select(data, regime = regime, date), - by = 'date') - - # add deterministic components - if('const' %in% type | 'both' %in% type){Y$const = 1} - if('trend' %in% type | 'both' %in% type){Y$trend = c(1:nrow(Y))} - - # detect regime values - regimes = unique(Y$regime) - - ### estimate coefficients ---------------------- - models = Y %>% - # split by regime - dplyr::group_split(regime) %>% - purrr::map(.f = function(Y){ - - regime.val = unique(Y$regime) - - # calculate equation by equation - models = as.list(regressors) %>% - purrr::map(.f = function(target){ - - X = Y %>% - dplyr::select( - dplyr::contains('.l'), target = target, - dplyr::contains('const'), dplyr::contains('trend')) - - # estimate OLS - model = stats::lm(target ~ . - 1, data = X) - - # coefficients - c = broom::tidy(model) %>% dplyr::select(term, coef = estimate) - c$y = target - - # standard errors - se = broom::tidy(model) %>% dplyr::select(term, std.error) - se$y = target - - # log likelihood - ll = stats::logLik(model)[1] - - # return results - return(list(coef = c, se = se, ll = ll)) - - }) - - # extract coefficients - coef = - purrr::map(models, .f = function(X){return(X$coef)}) %>% - purrr::reduce(dplyr::bind_rows) %>% - tidyr::pivot_wider(values_from = coef, names_from = term) - - - # extract coefficients' standard errors - se = - purrr::map(models, .f = function(X){return(X$se)}) %>% - purrr::reduce(dplyr::bind_rows) %>% - tidyr::pivot_wider(values_from = std.error, names_from = term) - - # extract log likelihood - ll = - purrr::map(models, .f = function(X){return(X$ll)}) %>% - purrr::reduce(rbind) %>% - sum() - - # package for return - model = list(coef = coef, se = se, p = p, freq = freq, type = type, ll = ll, horizon = horizon, regime = regime.val) - - }) - - names(models) = paste0('regime_', unique(Y$regime)) - - - fr = as.list(regimes) %>% - purrr::map(.f = function(regime.val){ - - coef = models[[paste0('regime_', regime.val)]]$coef - - ### estimate forecasts ----------------------- - forecasts = list() - for(i in 1:horizon){ - - # update X - if(i == 1){ - - X = Y %>% - dplyr::select( - dplyr::contains('.l'), - dplyr::contains('const'), - dplyr::contains('trend')) - - X.date = data$date - - }else{ - - X = - dplyr::bind_rows( - dplyr::select(data[1:i,], date, regressors), - dplyr::select(forecast_prev[i+1:nrow(forecast_prev),], date = forecast.date, regressors)) %>% - n.lag(lags = p) - - X.date = X$date - - X = X %>% - dplyr::filter(!is.na(date)) %>% - dplyr::select(dplyr::contains('.l')) - - if('const' %in% type | 'both' %in% type){X$const = 1} - if('trend' %in% type | 'both' %in% type){X$trend = c(1:nrow(X)) + (i-1)} - - } - - # estimate i-step ahead forecast - forecast = as.matrix(X) %*% as.matrix(t(coef[,-1])) - colnames(forecast) = regressors - - # set forecast date - if(i == 1){ - forecast.date = stats::na.omit(X.date) - }else{ - forecast.date = - forecast_date( - forecast.date = stats::na.omit(X.date), - horizon = i-1, - freq = freq - ) - } - - # add in dates - forecast = - data.frame( - date = data$date, - forecast.date = forecast.date, - forecast - ) %>% - dplyr::left_join(dplyr::select(Y, date, model.regime = regime), by = 'date') - - # store forecasts - forecasts[[paste0('H_',i)]] = forecast - forecast_prev = forecast - - } - - ### calculate residuals ----------------------- - residuals = forecasts %>% - # error by forecast horizon - purrr::map(.f = function(forecast){ - - error = data.frame(forecast) - error[,c(regressors)] = forecast[,c(regressors)] - data.frame(data)[, c(regressors)] - - return(error) - - }) - - ### return output ----------------------- - residuals = residuals %>% - purrr::map(.f = function(r){ - return( dplyr::filter(r, regime.val == model.regime) ) - }) - - forecasts = forecasts %>% - purrr::map(.f = function(f){ - return( dplyr::filter(f, regime.val == model.regime) ) - }) - - return( - list( - forecasts = forecasts, - residuals = residuals - ) - ) - - }) - - ### Organize and return output ------- - forecasts = as.list(c(1:horizon)) %>% - purrr::map(.f = function(h){ - - f = fr %>% - purrr::map(.f = function(r){ - return(r$forecast[[paste0('H_',h)]]) - }) %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::arrange(date) - - }) - - - residuals = as.list(c(1:horizon)) %>% - purrr::map(.f = function(h){ - - f = fr %>% - purrr::map(.f = function(r){ - return(r$residuals[[paste0('H_',h)]]) - }) %>% - purrr::reduce(dplyr::bind_rows) %>% - dplyr::arrange(date) - - }) - - names(forecasts) = paste0('H_',c(1 : horizon)) - names(residuals) = paste0('H_',c(1 : horizon)) - - ### return output -------------- - return( - list( - model = models, - data = data, - forecasts = forecasts, - residuals = residuals, - regime = regime - ) - ) -} - -RVAR = function( - data, # data.frame, matrix, ts, xts, zoo: Endogenous regressors - horizon = 10, # int: forecast horizons - freq = 'month', # string: frequency of data (day, week, month, quarter, year) - type = 'const', # string: type of deterministic terms to add ('none', 'const', 'trend', 'both') - p = 1, # int: lags - lag.ic = NULL, # string: information criterion to choose the optimal number of lags ('AIC' or 'BIC') - lag.max = NULL, # int: maximum number of lags to test in lag selection - regime = NULL, # string: name or regime assignment vector in the design matrix (data) - regime.method = 'rf', # string: regime assignment technique ('rf', 'kmeans', 'EM', 'BP') - regime.n = 2, # int: number of regimes to estimate (applies to kmeans and EM) - structure = 'short', # string: type of structural identification strategy to use in model analysis (NULL, 'short', or 'IV') - instrument = NULL, # string: name of instrumental variable contained in the data matrix - instrumented = NULL # string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data -){ - - # function warnings - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.null(lag.ic)){ - if(!lag.ic %in% c('BIC','AIC')){ - stop("lag.ic must be either 'BIC', 'AIC', or NULL") - } - } - if(!is.null(lag.max)){ - if(lag.max %% 1 != 0){ - stop('lag.max must be an integer if IC-based lag selection is used') - } - } - if(!structure %in% c('short', 'IV', 'IV-short') & !is.null(structure)){ - stop("strucutre must be one of 'strucutre', 'IV', 'IV-short', or NULL.") - } - if(!is.null(instrument)){ - if(!instrument %in% colnames(data)){ - stop("instrument must be the name of a variable found in data.") - } - } - if(!is.null(instrumented)){ - if(!instrumented %in% colnames(data)){ - stop("instrumented must be the name of a variable found in data.") - } - } - if(!is.matrix(data) & !is.data.frame(data)){ - stop('data must be a matrix or data.frame') - } - if(!is.numeric(p) | p %% 1 != 0){ - stop('p must be an integer') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - if(!freq %in% c('day','week','month','quarter','year')){ - stop("freq must be one of the following strings: 'day','week','month','quarter','year'") - } - - # cast as data frame if ts, xts, or zoo object - if(stats::is.ts(data) | xts::is.xts(data) | zoo::is.zoo(data)){ - data = data.frame(date = zoo::index(date), data) - } - - # set aside instruments - if(!is.null(instrument)){ - data.instrument = dplyr::select(data, date, dplyr::all_of(instrument)) - data = dplyr::select(data, -dplyr::all_of(instrument)) - }else{ - data.instrument = NULL - } - - # detect variable to be instrumented - # (defaults to first non-date column if missing) - if(is.null(instrumented)){ - var_to_instrument = colnames(dplyr::select(data, -date, -regime))[1] - }else{ - var_to_instrument = instrumented - } - - # create regimes - if(is.null(regime)){ - - data = - regimes( - data, - regime.n = regime.n, - method = regime.method) - - regime = 'regime' - - } - - # create VAR - if(!is.null(lag.ic) & !is.null(lag.max)){ - - ic.scores = vector(length = lag.max+1) - - models = c(1:lag.max) %>% - purrr::map(.f = function(p){ - - # estimate candidate model - model = - RVAR_estimate( - data = data, - p = p, - regime = regime, - horizon = horizon, - freq = freq, - type = type - ) - - # calculate IC - ic.score = - IC( - ic = lag.ic, - errors = model$residuals[[1]], - data = data, - p = p - ) - - ic.scores[p] = ic.score - - # return candidate model - return(model) - - }) - - # return IC minimizing VAR - min.ic = which.min(ic.scores) - model = models[[min.ic]] - - }else{ - - model = - RVAR_estimate( - data = data, - p = p, - regime = regime, - horizon = horizon, - freq = freq, - type = type - ) - } - - # add structure - model$structure = structure - model$instrument = data.instrument - model$instrumented = var_to_instrument - - class(model) = 'RVAR' - return(model) - - -} - -rvar_irf = function( - rvar, # threshold VAR output - horizon = 10, # int: number of periods - CI = c(0.1, 0.9), # numeric vector: c(lower ci bound, upper ci bound) - bootstrap.type = 'auto', # string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used - bootstrap.num = 100, # int: number of bootstraps - bootstrap.parallel = FALSE, # boolean: create IRF draws in parallel - bootstrap.cores = -1 # int: number of cores to use in parallel processing; -1 detects and uses half the available cores -){ - - # function warnings - if(!is.numeric(bootstrap.num) | bootstrap.num %% 1 != 0){ - stop('bootstrap.num must be an integer') - } - if(!is.numeric(CI) | length(CI) != 2 | min(CI) < 0 | max(CI) > 1 | is.na(sum(CI))){ - stop('CI must be a two element numeric vector bound [0,1]') - } - if(!is.numeric(horizon) | horizon %% 1 != 0 | horizon <= 0){ - stop('horizon must be a positive integer') - } - - # function variables - y = forecast.date = shock = target = response = response.lower = response.upper = model.regime = response.med = response.adjust = NULL - - # set data - data = rvar$data - p = rvar$model[[1]]$p - freq = rvar$model[[1]]$freq - regime = rvar$regime - regressors = colnames(dplyr::select(data, -date, -regime)) - structre = rvar$structure - - if('const' %in% colnames(rvar$model[[1]]$coef) & - 'trend' %in% colnames(rvar$model[[1]]$coef)){ - type = 'both' - }else if('const' %in% colnames(rvar$model[[1]]$coef)){ - type = 'const' - }else if('trend' %in% colnames(rvar$model[[1]]$coef)){ - type = 'trend' - } - - p.lower = CI[1] - p.upper = CI[2] - - regimes = dplyr::select(data, regime = regime) - regimes = unique(regimes$regime) - - # estimate impulse responses by regime - results = as.list(regimes) %>% - purrr::map(.f = function(regime.val){ - - # set regime specific data - coef = rvar$model[[paste0('regime_',regime.val)]]$coef - - residuals = rvar$residuals[[1]] %>% - dplyr::filter(model.regime == regime.val) %>% - dplyr::select(-model.regime) - - is = data %>% - dplyr::inner_join(dplyr::select(residuals, date), by = 'date') %>% - dplyr::select(-regime) - - if(!is.null(rvar$instrument)){ - instrument = dplyr::inner_join(rvar$instrument, dplyr::select(is, date), by = 'date') - } - - ### calculate impulse responses -------------- - - # estimate error-covariance matrix or structural impact matrix - B = - solve_B( - var = list( - residuals = residuals, - structure = rvar$structure, - instrument = instrument, - instrumented = rvar$instrumented - ) - ) - - # estimate IRFs - phi = dplyr::select(coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) - - irf = IRF(Phi = phi, - B = B, - lag = horizon, - structure = rvar$structure) - - # reorganize results - irf = data.frame(t(irf)) - - if(!is.null(rvar$structure)){ - if(rvar$structure == 'IV-short'){ - shocks = c(rvar$instrumented, regressors[!regressors %in% rvar$instrumented]) - irf$shock = rep(shocks, horizon + 1) - } - }else{ - irf$shock = rep(regressors, horizon + 1) - } - - irf$horizon = sort(rep(c(0:horizon), length(regressors))) - irf = dplyr::arrange(irf, shock, horizon) - rownames(irf) = NULL - colnames(irf) = c(regressors, 'shock', 'horizon') - - ### bootstrap irf standard errors -------------- - - # 0. set up parallel back-end - if(bootstrap.parallel == TRUE){ - if(bootstrap.cores == -1){ - n.cores = floor(future::availableCores() / 2) - }else{ - n.cores = bootstrap.parallel - } - future::plan(future::multisession, workers = n.cores) - }else{ - future::plan(future::sequential) - } - - # 1. create bootstrap time series - bagged.irf = as.list(1:bootstrap.num) %>% - furrr::future_map(.f = function(count){ - - # bootstrap residuals - if(bootstrap.type == 'wild' | - bootstrap.type == 'auto' & structure == 'IV' | - bootstrap.type == 'auto' & structure == 'IV-short'){ - - # 'wild' bootstrap technique for simple distribution - # using observed scaled residuals with random sign flip. - # See the Rademacher distribution. - U = residuals[,!colnames(residuals) %in% c('date','forecast.date','model.regime')] - r = sample(c(-1,1), size = nrow(U), replace = T) - U = sweep(U, MARGIN = 1, r, `*`) - - # bootstrap instrument - if(!is.null(instrument)){ - instrument.bag = instrument - instrument.bag[,-1] = sweep(instrument.bag[,-1], MARGIN = 1, r, `*`) - } - - }else if(bootstrap.type == 'standard' | - bootstrap.type == 'auto' & structure != 'IV' & structure != 'IV-short'){ - - # standard bootstrap technique a al Lutkepohl (2005) - # draw residuals with replacement - U = na.omit(residuals) - U = U[sample(c(1:nrow(U)), - size = nrow(residuals), - replace = TRUE),] - U = U %>% - dplyr::select(-date, -forecast.date, -model.regime) %>% - dplyr::mutate_all(function(X){return(X-mean(X, na.rm = T))}) - } - - # recursively build synthetic data - Y = matrix(nrow = nrow(is), ncol = ncol(is)-1) - Y = data.frame(Y); colnames(Y) = regressors - Y[1:p, ] = is[1:p, -1] - - for(i in (p+1):nrow(is)){ - - X = Y[(i-p):(i-1),] - X = embed(as.matrix(X), dimension = p) - X = data.frame(X) - - if(type %in% c('const', 'both')){X$const = 1} - if(type %in% c('trend', 'both')){X$trend = c(1:nrow(X))} - - X.hat = as.matrix(coef[,-1]) %*% t(as.matrix(X)) - X.hat = t(X.hat) - - Y[i, ] = X.hat - U[i,] - - } - - Y$date = is$date - - # estimate VAR with synthetic data - var.bag = - VAR( - data = Y, - p = p, - horizon = 1, - freq = freq, - type = type - ) - - var.bag$structure = rvar$structure - var.bag$instrumented = rvar$instrumented - if(!is.null(instrument)){var.bag$instrument = instrument.bag} - - # estimate error-covariance matrix or structural impact matrix - B.bag = solve_B(var.bag, report_iv = FALSE) - - # set bagged coef matrix - coef.bag = dplyr::select(var.bag$model$coef, -y, -dplyr::contains('const'), -dplyr::contains('trend')) - - # estimate IRFs - irf.bag = IRF(Phi = coef.bag, - B = B.bag, - lag = horizon, - structure = rvar$structure) - - # reorganize results - irf.bag = data.frame(t(irf.bag)) - - if(!is.null(rvar$structure)){ - if(rvar$structure == 'IV-short'){ - shocks = c(rvar$instrumented, regressors[!regressors %in% rvar$instrumented]) - irf.bag$shock = rep(shocks, horizon + 1) - } - }else{ - irf.bag$shock = rep(regressors, horizon + 1) - } - - irf.bag$horizon = sort(rep(c(0:horizon), length(regressors))) - irf.bag = dplyr::arrange(irf.bag, shock, horizon) - rownames(irf.bag) = NULL - colnames(irf.bag) = c(regressors, 'shock', 'horizon') - - return(irf.bag) - - }) - - # 2. calculate confidence intervals - # collapse to data.frame - ci = bagged.irf %>% - purrr::reduce(dplyr::bind_rows) - - # remove unused shocks in the case of IV - if(!is.null(rvar$structure)){ - if(rvar$structure == 'IV'){ - ci = dplyr::filter(ci, shock == rvar$instrumented) - } - } - - # estimate bands - ci = ci %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% - group_by(shock, horizon, target) %>% - summarize(response.lower = quantile(response, probs = p.lower, na.rm = T), - response.upper = quantile(response, probs = p.upper, na.rm = T), - response.mean = mean(response, na.rm = T) ) - - # combine point estimates and CI - irf = irf %>% - tidyr::pivot_longer(cols = regressors, names_to = 'target', values_to = 'response') %>% - dplyr::arrange(shock, horizon) - - irf = - full_join( - irf, ci, - by = c('shock', 'target', 'horizon') - ) - - # adjust for bias in CI - # (bias can be introduced in the bootstrapping if residuals are not actually mean zero) - irf = irf %>% - dplyr::mutate( - response.adjust = response - response.mean, - response.lower = response.lower + response.adjust, - response.upper = response.upper + response.adjust - ) %>% - dplyr::select(target, shock, horizon, response.lower, response, response.upper) %>% - dplyr::arrange(target, shock, horizon) - - ### return output -------------- - return(irf) - - }) - - names(results) = paste0('regime_', regimes) - - ### return output -------------- - return(results) - -} - -plot_individual_irf = function( - irf, # irf object - shock.var, # string: name of variable to treat as the shock - response.var, # string: name of variable to treat as the response - title, # string: title of the chart - ylab # string: y-axis label -){ - - # function variables - response = error = horizon = shock = target = response.lower = response.upper = NULL - - # filter for one shock and one target - plotdata = irf %>% - dplyr::filter(shock == shock.var & target == response.var) %>% - dplyr::select(horizon, response, response.lower, response.upper) - - # plot - irf.plot <- plotdata %>% - ggplot2::ggplot(ggplot2::aes(x=horizon, y=response, ymin=response.lower, ymax=response.upper)) + - ggplot2::geom_hline(yintercept = 0, color="black") + - ggplot2::geom_ribbon(fill="royalblue", alpha=0.2) + - ggplot2::geom_line(color = 'darkred', lwd = 1) + - ggplot2::theme_light() + - ggplot2::ggtitle(title)+ - ggplot2::ylab(ylab)+ - ggplot2::xlab("") + - ggplot2::theme(plot.title = ggplot2::element_text(size = 11, hjust=0.5), - axis.title.y = ggplot2::element_text(size=11)) - - irf.plot -} - -##################################################################### -# TEST - -library(tidyverse) -library(lubridate) - -# Data -data.macro = read_csv('/scratch/m1tjp01/Andrea/FinancialStability/Data/Intermediate/model_monthly_macro_covariates.csv') -data.macro = data.macro %>% filter(year(date) >= 1991) -data.macro = data.macro %>% select(date, PCE, UNEMP, PR, EBP, mp_shock) - -# MODEL -# var = -# VAR( -# data = data.macro, -# horizon = 1, -# freq = 'month', -# p = 1, -# structure = 'IV-short', -# instrument = 'mp_shock', -# instrumented = 'PR' -# ) -# -# irf = var_irf(var, horizon = 20, bootstrap.type = 'wild', bootstrap.num = 10, bootstrap.parallel = TRUE) -# plot_irf(irf, shocks = 'PR') - -rvar = - RVAR( - data = data.macro, - horizon = 1, - freq = 'month', - p = 1, - structure = 'IV-short', - instrument = 'mp_shock', - instrumented = 'PR' - ) - -irf = rvar_irf(rvar, horizon = 20, bootstrap.type = 'wild', bootstrap.num = 10, bootstrap.parallel = TRUE) -plot_irf(irf[[1]], shocks = 'PR') - diff --git a/docs/LICENSE.html b/docs/LICENSE.html index 7d15b60..5c16da1 100644 --- a/docs/LICENSE.html +++ b/docs/LICENSE.html @@ -87,7 +87,7 @@ sovereign - 1.1.0 + 1.2.0 diff --git a/docs/authors.html b/docs/authors.html index 3c04416..86c09b5 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -87,7 +87,7 @@ sovereign - 1.1.0 + 1.2.0 diff --git a/docs/index.html b/docs/index.html index 5fc025c..b62cdf2 100644 --- a/docs/index.html +++ b/docs/index.html @@ -17,7 +17,7 @@ + historical decompositions, and forecast error variance decompositions. "> + + + + + + + +Estimate forecast error variance decomposition — FEVD • sovereign + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+

Estimate the forecast error variance decomposition for VARs with +either short or 'IV-short' structural errors. See VAR +and RVAR documentation for details regarding structural errors.

+
+ +
FEVD(model, horizon = 10, scale = TRUE)
+ +

Arguments

+ + + + + + + + + + + + + + +
model

VAR or RVAR class object

horizon

int: number of periods

scale

boolean: scale variable contribution as percent of total error

+ +

Value

+ +

long-form data.frame

+

See also

+ + + +

Examples

+
# \donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4) +
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+# impulse response functions +var.irf = sovereign::IRF(var) +
#> +#> Attaching package: ‘purrr’
#> The following object is masked from ‘package:testthat’: +#> +#> is_null
+# forecast error variance decomposition +var.fevd = sovereign::FEVD(var) + +# historical shock decomposition +var.hd = sovereign::HD(var) + +# } + +
+
+ +
+ + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + + + + diff --git a/docs/reference/HD.html b/docs/reference/HD.html new file mode 100644 index 0000000..ce9df2b --- /dev/null +++ b/docs/reference/HD.html @@ -0,0 +1,256 @@ + + + + + + + + +Estimate historical decomposition — HD • sovereign + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+

Estimate the historical decomposition for VARs with +either 'short' or 'IV-short' structural errors. See VAR +and RVAR documentation for details regarding structural errors.

+
+ +
HD(model)
+ +

Arguments

+ + + + + + +
model

VAR or RVAR class object

+ +

Value

+ +

long-from data.frame

+

See also

+ + + +

Examples

+
# \donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4) +
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+# impulse response functions +var.irf = sovereign::IRF(var) +
#> +#> Attaching package: ‘purrr’
#> The following object is masked from ‘package:testthat’: +#> +#> is_null
+# forecast error variance decomposition +var.fevd = sovereign::FEVD(var) + +# historical shock decomposition +var.hd = sovereign::HD(var) + +# } + +
+
+ +
+ + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + + + + diff --git a/docs/reference/LP.html b/docs/reference/LP.html index 539a088..17c0725 100644 --- a/docs/reference/LP.html +++ b/docs/reference/LP.html @@ -88,7 +88,7 @@ sovereign - 1.0.1 + 1.2.0 @@ -211,6 +211,13 @@

Arg

Value

list object with elements data, model, forecasts, residuals; if there is more than one forecast horizon estimated, then model, forecasts, residuals will each be a list where each element corresponds to a single horizon

+

References

+ + +
    +
  1. Jorda, Oscar "Estimation and Inference of Impulse Responses by Local Projections" 2005.

  2. +
+

See also

LP()

diff --git a/docs/reference/RLP.html b/docs/reference/RLP.html index 76f2029..3fedb1e 100644 --- a/docs/reference/RLP.html +++ b/docs/reference/RLP.html @@ -46,7 +46,10 @@ - + @@ -88,7 +91,7 @@ sovereign - 1.0.1 + 1.2.0
@@ -147,7 +150,10 @@

Estimate regime-dependent local projections

-

Estimate regime-dependent local projections

+

Estimate a regime-dependent local projection (i.e. a state-dependent LP), with an exogenous state indicator, of the specification: +$$Y_{t+h} = X_t \beta_{s_t} + \epsilon_t$$ +where t is the time index, and s is a mutually exclusive state of the world observed at time t. When the regime vector is +not supplied by the user, then a two-state regime series is estimated via random forest.

RLP(
@@ -225,7 +231,16 @@ 

Arg

Value

-

list object with elements data, model, forecasts, residuals; if there is more than one forecast horizon estimated, then model, forecasts, residuals will each be a list where each element corresponds to a single horizon

+

list of lists, one list per regime, each regime with objects with elements data, model, forecasts, residuals; +if there is more than one forecast horizon estimated, then model, forecasts, residuals +will each be a list where each element corresponds to a single horizon

+

References

+ + +
    +
  1. Jorda, Oscar "Estimation and Inference of Impulse Responses by Local Projections" 2005.

  2. +
+

See also

LP()

diff --git a/docs/reference/RVAR.html b/docs/reference/RVAR.html index d59fa5b..01d25ec 100644 --- a/docs/reference/RVAR.html +++ b/docs/reference/RVAR.html @@ -6,7 +6,7 @@ -Estimate regime-dependent VAR — RVAR • sovereign +Estimate regime-dependent VAR, SVAR, or Proxy-SVAR — RVAR • sovereign @@ -45,11 +45,11 @@ - + @@ -92,7 +92,7 @@ sovereign - 1.1.0 + 1.2.0
@@ -145,16 +145,16 @@

Estimate a regime-dependent VAR (i.e. a state-dependent VAR), with an exogenous state indicator, of the specification: -$$Y_t = X \beta_s + \epsilon_t$$ -where t is the time index, Y is the set of outcome vectors, X the design matrix (of p lagged values of Y), and -s is a mutually exclusive state of the world observed at time t-1. When the regime vector is not supplied by the user, then a two-state +$$Y_{t+1} = X_t \beta_{s_t} + \epsilon_t$$ +where t is the time index, Y is the set of outcome vectors, X the design matrix (of p lagged values of Y), and +s is a mutually exclusive state of the world observed at time t. When the regime vector is not supplied by the user, then a two-state regime series is estimated via random forest.

@@ -168,7 +168,10 @@

Estimate regime-dependent VAR

lag.max = NULL, regime = NULL, regime.method = "rf", - regime.n = 2 + regime.n = 2, + structure = "short", + instrument = NULL, + instrumented = NULL )

Arguments

@@ -214,26 +217,69 @@

Arg regime.n

int: number of regimes to estimate (applies to kmeans and EM)

+ + structure +

string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short')

+ + + instrument +

string: name of instrumental variable contained in the data matrix

+ + + instrumented +

string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data

+

Value

-

list of lists, each regime returns its own list with elements data, model, forecasts, residuals

+

List of lists, where each regime is a list with items:

    +
  1. data: data.frame with endogenous variables and 'date' column.

  2. +
  3. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood, regime indicator

  4. +
  5. forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast

  6. +
  7. residuals: list of data.frames per horizon; data.frame of residuals

  8. +
  9. structure: string denoting which structural identification strategy will be used in analysis (or NA)

  10. +
  11. instrument: data.frame with 'date' column and 'instrument' column (or NULL)

  12. +
  13. instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NULL)

  14. +
+

Details

The regime-dependent VAR is a generalization of the popular threshold VAR - which trades off estimating a threshold value for an endogenous variable for accepting an exogenous regime that can be based on information from inside or outside of the system, with or without parametric -assumptions, and with or without timing restrictions.

+assumptions, and with or without timing restrictions. Moreover, the RVAR may be extended to include structural shocks, including the use of instrumental +variables.

+

State dependence. The RVAR augments the traditional VAR by allowing state-dependence in the coefficient matrix. The RVAR differs from the more common threshold VAR (TVAR), due +to the fact that states are exegonesouly determined. As a result, the states (i.e. regimes) do not need to be based on information inside the model, moreover, regimes can be +determined by any process the user determines best fits their needs. For example, regimes based on NBER dated recessions and expansions are based on a judgmental process +considering hundreds of series, potentially none of which are in the VAR being modeled. Alternatively, a user may use unsupervised machine learning to assign regimes - this is +the process the sovereign::regimes function facilitates.

+

Structural shocks. See Sims (1980) for details regarding the baseline vector-autoregression (VAR) model. The VAR may be augmented to become a structural VAR (SVAR) with one of three different structural identification strategies:

    +
  1. short-term impact restrictions via Cholesky decomposition, see Christiano et al (1999) for details (structure = 'short')

  2. +
  3. external instrument identification, i.e. a Proxy-SVAR strategy, see Mertens and Ravn (2013) for details (structure = 'IV')

  4. +
  5. or a combination of short-term and IV identification via Lunsford (2015) (structure = 'IV-short')

  6. +
+ +

Note that including structure does not change the estimation of model coefficients or forecasts, but does change impulse response functions, forecast error variance decomposition, +and historical decompositions. Historical decompositions will not be available for models using the 'IV' structure. Additionally note that only one instrument may be used in this +estimation routine.

+

References

+ + +
    +
  1. Christiano, Lawrence, Martin Eichenbaum, and Charles Evans "Monetary policy shocks: What have we learned and to what end?" Handbook of Macroeconomics, Vol 1, Part A, 1999.

  2. +
  3. Lunsford, Kurt "Identifying Structural VARs with a Proxy Variable and a Test for a Weak Proxy" 2015.

  4. +
  5. Mertens, Karel and Morten Ravn "The Dynamic Effects of Personal and Corporate Income Tax Changes in the United States" 2013.

  6. +
  7. Sims, Christopher "Macroeconomics and Reality" 1980.

  8. +
+

See also

VAR()

-

var_irf()

-

var_fevd()

-

var_hd()

RVAR()

-

rvar_irf()

-

rvar_fevd()

-

rvar_hd()

+

IRF()

+

FEVD()

+

HD()

Examples

# \donttest{ @@ -256,16 +302,16 @@

Examp regime.n = 2, lag.ic = 'BIC', lag.max = 4) -

#> Error in n.lag(., lags = p): could not find function "n.lag"
-# impulse response functions -rvar.irf = sovereign::rvar_irf(rvar) -
#> Error in sovereign::rvar_irf(rvar): object 'rvar' not found
-# forecast error variance decomposition -rvar.fevd = sovereign::rvar_fevd(rvar) -
#> Error in sovereign::rvar_fevd(rvar): object 'rvar' not found
-# historical shock decomposition -rvar.hd = sovereign::rvar_hd(rvar) -
#> Error in sovereign::rvar_hd(rvar): object 'rvar' not found
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+ # impulse response functions + rvar.irf = sovereign::rvar_irf(rvar) + + # forecast error variance decomposition + rvar.fevd = sovereign::rvar_fevd(rvar) + + # historical shock decomposition + rvar.hd = sovereign::rvar_hd(rvar) + # }
diff --git a/docs/reference/VAR.html b/docs/reference/VAR.html index 1a0b816..50b8ab0 100644 --- a/docs/reference/VAR.html +++ b/docs/reference/VAR.html @@ -88,7 +88,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -196,7 +196,7 @@

Arg structure -

string: type of structural identification strategy to use in model analysis (NULL, 'short', 'IV', or 'IV-short')

+

string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short')

instrument @@ -213,12 +213,12 @@

Value

  1. data: data.frame with endogenous variables and 'date' column.

  2. -
  3. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of type

  4. +
  5. model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood

  6. forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast

  7. residuals: list of data.frames per horizon; data.frame of residuals

  8. -
  9. structure: string denoting which structural identification strategy will be used in analysis (or NULL)

  10. +
  11. structure: string denoting which structural identification strategy will be used in analysis (or NA)

  12. instrument: data.frame with 'date' column and 'instrument' column (or NULL)

  13. -
  14. instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NULL)

  15. +
  16. instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NA)

Details

@@ -266,16 +266,16 @@

Examp freq = 'month', lag.ic = 'BIC', lag.max = 4) -
#> Error in n.lag(., lags = p): could not find function "n.lag"
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
# impulse response functions var.irf = sovereign::var_irf(var) -
#> Error in var$model: object of type 'closure' is not subsettable
+ # forecast error variance decomposition var.fevd = sovereign::var_fevd(var) -
#> Error in var$model: object of type 'closure' is not subsettable
+ # historical shock decomposition var.hd = sovereign::var_hd(var) -
#> Error in var$model: object of type 'closure' is not subsettable
# } +# }
diff --git a/docs/reference/index.html b/docs/reference/index.html index 476b74f..7382046 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -87,7 +87,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -193,7 +193,7 @@

RVAR()

-

Estimate regime-dependent VAR

+

Estimate regime-dependent VAR, SVAR, or Proxy-SVAR

@@ -228,51 +228,21 @@

var_irf()

+

IRF()

Estimate impulse response functions

-

rvar_irf()

- -

Estimate regime-dependent impulse response functions

- - - -

var_fevd()

+

FEVD()

Estimate forecast error variance decomposition

-

rvar_fevd()

- -

Estimate regime-dependent forecast error variance decomposition

- - - -

var_hd()

+

HD()

Estimate historical decomposition

- - - -

rvar_hd()

- -

Estimate regime-dependent historical decomposition

- - - -

lp_irf()

- -

Estimate impulse response functions

- - - -

rlp_irf()

- -

Estimate regime-dependent impulse response functions

diff --git a/docs/reference/irf.html b/docs/reference/irf.html new file mode 100644 index 0000000..713184d --- /dev/null +++ b/docs/reference/irf.html @@ -0,0 +1,291 @@ + + + + + + + + +Estimate impulse response functions — IRF • sovereign + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+

See VAR, RVAR, LP, and RLP documentation for details +regarding models and structural errors.

+
+ +
IRF(
+  model,
+  horizon = 10,
+  CI = c(0.1, 0.9),
+  bootstrap.type = "auto",
+  bootstrap.num = 100,
+  bootstrap.parallel = FALSE,
+  bootstrap.cores = -1
+)
+ +

Arguments

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
model

VAR, RVAR, LP, or RLP class object

horizon

int: number of periods

CI

numeric vector: c(lower ci bound, upper ci bound)

bootstrap.type

string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used

bootstrap.num

int: number of bootstraps

bootstrap.parallel

boolean: create IRF draws in parallel

bootstrap.cores

int: number of cores to use in parallel processing; -1 detects and uses half the available cores

+ +

Value

+ +

data frame with columns target, shock, horizon, response.lower, response, response.upper; regime-based models return a list with a data frame per regime.

+

See also

+ + + +

Examples

+
# \donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4 + ) +
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+ # impulse response function + var.irf = sovereign::IRF(var) + + # local projection forecasts + lp = + sovereign::LP( + data = Data, + horizon = c(1:10), + lag.ic = 'AIC', + lag.max = 4, + type = 'both', + freq = 'month') +
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+ # LP impulse response function + lp.irf = sovereign::IRF(lp) + +# } + +
+
+ +
+ + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + + + + diff --git a/docs/reference/lp_irf.html b/docs/reference/lp_irf.html index 68a784d..e60f512 100644 --- a/docs/reference/lp_irf.html +++ b/docs/reference/lp_irf.html @@ -88,7 +88,7 @@ sovereign - 1.0.1 + 1.2.0 diff --git a/docs/reference/rvar_fevd.html b/docs/reference/rvar_fevd.html index bd8a295..9b3a822 100644 --- a/docs/reference/rvar_fevd.html +++ b/docs/reference/rvar_fevd.html @@ -46,7 +46,8 @@ - + @@ -88,7 +89,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -147,7 +148,8 @@

Estimate regime-dependent forecast error variance decomposition

-

Estimate regime-dependent forecast error variance decomposition

+

Estimate forecast error variance decomposition for RVARs +with either short or 'IV-short' structural errors.

rvar_fevd(rvar, horizon = 10, scale = TRUE)
@@ -204,16 +206,16 @@

Examp regime.n = 2, lag.ic = 'BIC', lag.max = 4) -
#> Error in n.lag(., lags = p): could not find function "n.lag"
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
# impulse response functions rvar.irf = sovereign::rvar_irf(rvar) -
#> Error in sovereign::rvar_irf(rvar): object 'rvar' not found
+ # forecast error variance decomposition rvar.fevd = sovereign::rvar_fevd(rvar) -
#> Error in sovereign::rvar_fevd(rvar): object 'rvar' not found
+ # historical shock decomposition rvar.hd = sovereign::rvar_hd(rvar) -
#> Error in sovereign::rvar_hd(rvar): object 'rvar' not found
+ # }
diff --git a/docs/reference/rvar_hd.html b/docs/reference/rvar_hd.html index 7095685..f6d1b5c 100644 --- a/docs/reference/rvar_hd.html +++ b/docs/reference/rvar_hd.html @@ -46,10 +46,8 @@ - + @@ -91,7 +89,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -150,10 +148,8 @@

Estimate regime-dependent historical decomposition

-

Estimate historical decomposition with contemporaneous impact -restrictions via Cholesky decomposition - taking the variable ordering -present in the RVAR object. Estimate one response function per unique -state defined by the regime-dependent VAR.

+

Estimate historical decomposition for RVARs with +either short or 'IV-short' structural errors.

rvar_hd(rvar)
@@ -202,16 +198,16 @@

Examp regime.n = 2, lag.ic = 'BIC', lag.max = 4) -
#> Error in n.lag(., lags = p): could not find function "n.lag"
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
# impulse response functions rvar.irf = sovereign::rvar_irf(rvar) -
#> Error in sovereign::rvar_irf(rvar): object 'rvar' not found
+ # forecast error variance decomposition rvar.fevd = sovereign::rvar_fevd(rvar) -
#> Error in sovereign::rvar_fevd(rvar): object 'rvar' not found
+ # historical shock decomposition rvar.hd = sovereign::rvar_hd(rvar) -
#> Error in sovereign::rvar_hd(rvar): object 'rvar' not found
+ # }
diff --git a/docs/reference/rvar_irf.html b/docs/reference/rvar_irf.html index 73442ac..fc12e16 100644 --- a/docs/reference/rvar_irf.html +++ b/docs/reference/rvar_irf.html @@ -46,9 +46,7 @@ - + @@ -90,7 +88,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -149,12 +147,18 @@

Estimate regime-dependent impulse response functions

-

Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -taking the variable ordering present in the VAR object. Estimate one response function per unique -state defined by the regime-dependent VAR.

+

Estimate regime-dependent impulse response functions

-
rvar_irf(rvar, horizon = 10, bootstraps.num = 100, CI = c(0.1, 0.9))
+
rvar_irf(
+  rvar,
+  horizon = 10,
+  CI = c(0.1, 0.9),
+  bootstrap.type = "auto",
+  bootstrap.num = 100,
+  bootstrap.parallel = FALSE,
+  bootstrap.cores = -1
+)

Arguments

@@ -168,18 +172,30 @@

Arg

- + + + + + + + + + - - + + + + + +

int: number of periods

bootstraps.numCI

numeric vector: c(lower ci bound, upper ci bound)

bootstrap.type

string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used

bootstrap.num

int: number of bootstraps

CI

numeric vector: c(lower ci bound, upper ci bound)

bootstrap.parallel

boolean: create IRF draws in parallel

bootstrap.cores

int: number of cores to use in parallel processing; -1 detects and uses half the available cores

Value

-

list of lists, each regime returns its own list with elements irfs, ci.lower, and ci.upper; all elements are long-form data.frames

+

list of regimes, each with data.frame of columns target, shock, horizon, response.lower, response, response.upper

See also

VAR()

@@ -210,16 +226,16 @@

Examp regime.n = 2, lag.ic = 'BIC', lag.max = 4) -

#> Error in n.lag(., lags = p): could not find function "n.lag"
-# impulse response functions -rvar.irf = sovereign::rvar_irf(rvar) -
#> Error in sovereign::rvar_irf(rvar): object 'rvar' not found
-# forecast error variance decomposition -rvar.fevd = sovereign::rvar_fevd(rvar) -
#> Error in sovereign::rvar_fevd(rvar): object 'rvar' not found
-# historical shock decomposition -rvar.hd = sovereign::rvar_hd(rvar) -
#> Error in sovereign::rvar_hd(rvar): object 'rvar' not found
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+ # impulse response functions + rvar.irf = sovereign::rvar_irf(rvar) + + # forecast error variance decomposition + rvar.fevd = sovereign::rvar_fevd(rvar) + + # historical shock decomposition + rvar.hd = sovereign::rvar_hd(rvar) + # }
diff --git a/docs/reference/var_fevd.html b/docs/reference/var_fevd.html index cad8f58..2e5e3b6 100644 --- a/docs/reference/var_fevd.html +++ b/docs/reference/var_fevd.html @@ -46,7 +46,8 @@ - + @@ -88,7 +89,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -147,7 +148,8 @@

Estimate forecast error variance decomposition

-

Estimate forecast error variance decomposition

+

Estimate forecast error variance decomposition for VARs +with either short or 'IV-short' structural errors.

var_fevd(var, horizon = 10, scale = TRUE)
@@ -201,16 +203,16 @@

Examp freq = 'month', lag.ic = 'BIC', lag.max = 4) -
#> Error in n.lag(., lags = p): could not find function "n.lag"
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
# impulse response functions var.irf = sovereign::var_irf(var) -
#> Error: object of type 'closure' is not subsettable
+ # forecast error variance decomposition var.fevd = sovereign::var_fevd(var) -
#> Error: object of type 'closure' is not subsettable
+ # historical shock decomposition var.hd = sovereign::var_hd(var) -
#> Error: object of type 'closure' is not subsettable
+ # }
diff --git a/docs/reference/var_hd.html b/docs/reference/var_hd.html index e2dd842..469bc47 100644 --- a/docs/reference/var_hd.html +++ b/docs/reference/var_hd.html @@ -46,9 +46,8 @@ - + @@ -90,7 +89,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -149,9 +148,8 @@

Estimate historical decomposition

-

Estimate historical decomposition with contemporaneous impact -restrictions via Cholesky decomposition - taking the variable ordering -present in the VAR object.

+

Estimate historical decomposition for VARs with +either short or 'IV-short' structural errors.

var_hd(var)
@@ -197,16 +195,16 @@

Examp freq = 'month', lag.ic = 'BIC', lag.max = 4) -
#> Error in n.lag(., lags = p): could not find function "n.lag"
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
# impulse response functions var.irf = sovereign::var_irf(var) -
#> Error: object of type 'closure' is not subsettable
+ # forecast error variance decomposition var.fevd = sovereign::var_fevd(var) -
#> Error: object of type 'closure' is not subsettable
+ # historical shock decomposition var.hd = sovereign::var_hd(var) -
#> Error: object of type 'closure' is not subsettable
+ # }
diff --git a/docs/reference/var_irf.html b/docs/reference/var_irf.html index c836f2d..bb01fad 100644 --- a/docs/reference/var_irf.html +++ b/docs/reference/var_irf.html @@ -46,8 +46,7 @@ - + @@ -89,7 +88,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -148,8 +147,7 @@

Estimate impulse response functions

-

Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -taking the variable ordering present in the VAR object.

+

Estimate impulse response functions

var_irf(
@@ -178,14 +176,26 @@ 

Arg

numeric vector: c(lower ci bound, upper ci bound)

- bootstraps.num + bootstrap.type +

string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used

+ + + bootstrap.num

int: number of bootstraps

+ + bootstrap.parallel +

boolean: create IRF draws in parallel

+ + + bootstrap.cores +

int: number of cores to use in parallel processing; -1 detects and uses half the available cores

+

Value

-

list object with elements irfs, ci.lower, and ci.upper; all elements are long-form data.frames

+

data.frame with columns target, shock, horizon, response.lower, response, response.upper

See also

VAR()

@@ -215,16 +225,16 @@

Examp freq = 'month', lag.ic = 'BIC', lag.max = 4) -

#> Error in n.lag(., lags = p): could not find function "n.lag"
-# impulse response functions -var.irf = sovereign::var_irf(var) -
#> Error: object of type 'closure' is not subsettable
-# forecast error variance decomposition -var.fevd = sovereign::var_fevd(var) -
#> Error: object of type 'closure' is not subsettable
-# historical shock decomposition -var.hd = sovereign::var_hd(var) -
#> Error: object of type 'closure' is not subsettable
+
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
#> Warning: NAs introduced by coercion
+ # impulse response functions + var.irf = sovereign::var_irf(var) + + # forecast error variance decomposition + var.fevd = sovereign::var_fevd(var) + + # historical shock decomposition + var.hd = sovereign::var_hd(var) + # }
diff --git a/docs/to-do.html b/docs/to-do.html index 5627cf6..c1a3383 100644 --- a/docs/to-do.html +++ b/docs/to-do.html @@ -87,7 +87,7 @@ sovereign - 1.1.0 + 1.2.0 @@ -145,100 +145,50 @@

To-do

-
+

-High Priority (Version 1.0.1)

+High Priority (Version 1.2.0)
    -
  1. code -
      -
    1. -historical decomposition +
    2. Code
        -
      1. code
      2. -
      3. clean code
      4. -
      5. test
      6. +
      7. threshold VAR
      8. -documentation
        +more structural options
      9. -
      -
    3. -
    4. -LP volatility correction +
    5. Forecast performance
        -
      1. code
      2. -
      3. clean and add warnings
      4. -
      5. test
      6. -
      7. -documentation
        -
      8. -
      -
    6. -
    7. Add classes (e.g. class(var))
    8. -
    9. -Make n.lag not require a date
      +
    10. add BIC and AIC
    11. +
    12. add RMSE, MSE, MAE
  2. -
  3. documentation -
      -
    1. refine discussion of shocks
    2. -
    3. -define equations
      -
  4. -
  5. website +
  6. Documentation
      -
    1. -update home page
      -
    2. -
    3. -update references
      -
    4. -
    5. -update vignette
      +
    6. check that new VAR values section is correct
    7. +
    8. document proxy VAR code and methodology
    9. +
    10. link to helpful external resources (e.g. Bianchi’s slides)
    11. +
    12. add academic references to methods
    13. -
    14. update news
  7. -
-
-
-

-Medium Priority (Version 1.0.2)

+
  • Website
      -
    1. Code -
        -
      1. endeogenously estimated threshold VAR
      2. -
      3. more structural options -
          -
        1. no structure
        2. -
        3. short term (cholesky)
        4. -
        5. long term
        6. -
        7. instrumental variable (proxy var)
        8. -
        +
      4. update home page
      5. -
      6. Forecast performance -
          -
        1. add BIC and AIC
        2. -
        3. add RMSE, MSE, MAE
          +
        4. update references
        5. -
        -
      7. -
      +
    2. update vignette
    3. -
    4. Documentation -
        -
      1. regimes function methods
      2. -
      3. link to helpful external resources (e.g. Bianchi’s slides)
      4. -
      5. add academic references to methods
      6. +
      7. update news
  • -
    +

    -Low Priority (> Version 1.0.2)

    +Low Priority (> Version 1.2.0)
    1. Code
        @@ -246,6 +196,7 @@

      1. State-dependent ML
      2. Bayesian Treshold VAR
      3. +
      4. Data cleaning (e.g. hamilton filter)
    2. Documentation @@ -255,6 +206,17 @@

    +
    +

    +FOMC data

    +

    write a set of functions, perhaps its own package, with nicely cleaned FOMC data (set the functions to pull from a google drive, github, or some public location)

    +
      +
    • FOMC meeting and announcement dates
    • +
    • policy rates
    • +
    • QE actions
    • +
    • and derived MP shocks
    • +
    +
    diff --git a/man/FEVD.Rd b/man/FEVD.Rd new file mode 100644 index 0000000..a7e1d07 --- /dev/null +++ b/man/FEVD.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/analysis_wrappers.R +\name{FEVD} +\alias{FEVD} +\title{Estimate forecast error variance decomposition} +\usage{ +FEVD(model, horizon = 10, scale = TRUE) +} +\arguments{ +\item{model}{VAR or RVAR class object} + +\item{horizon}{int: number of periods} + +\item{scale}{boolean: scale variable contribution as percent of total error} +} +\value{ +long-form data.frame +} +\description{ +Estimate the forecast error variance decomposition for VARs with +either short or 'IV-short' structural errors. See VAR +and RVAR documentation for details regarding structural errors. +} +\examples{ +\donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4) + +# impulse response functions +var.irf = sovereign::IRF(var) + +# forecast error variance decomposition +var.fevd = sovereign::FEVD(var) + +# historical shock decomposition +var.hd = sovereign::HD(var) + +} + +} +\seealso{ +\code{\link[=VAR]{VAR()}} + +\code{\link[=var_fevd]{var_fevd()}} + +\code{\link[=RVAR]{RVAR()}} + +\code{\link[=rvar_fevd]{rvar_fevd()}} +} diff --git a/man/HD.Rd b/man/HD.Rd new file mode 100644 index 0000000..d4d6e76 --- /dev/null +++ b/man/HD.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/analysis_wrappers.R +\name{HD} +\alias{HD} +\title{Estimate historical decomposition} +\usage{ +HD(model) +} +\arguments{ +\item{model}{VAR or RVAR class object} +} +\value{ +long-from data.frame +} +\description{ +Estimate the historical decomposition for VARs with +either 'short' or 'IV-short' structural errors. See VAR +and RVAR documentation for details regarding structural errors. +} +\examples{ +\donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4) + +# impulse response functions +var.irf = sovereign::IRF(var) + +# forecast error variance decomposition +var.fevd = sovereign::FEVD(var) + +# historical shock decomposition +var.hd = sovereign::HD(var) + +} + +} +\seealso{ +\code{\link[=VAR]{VAR()}} + +\code{\link[=var_hd]{var_hd()}} + +\code{\link[=RVAR]{RVAR()}} + +\code{\link[=rvar_hd]{rvar_hd()}} +} diff --git a/man/IRF.Rd b/man/IRF.Rd new file mode 100644 index 0000000..3a5e8f4 --- /dev/null +++ b/man/IRF.Rd @@ -0,0 +1,86 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/analysis_wrappers.R +\name{IRF} +\alias{IRF} +\title{Estimate impulse response functions} +\usage{ +IRF( + model, + horizon = 10, + CI = c(0.1, 0.9), + bootstrap.type = "auto", + bootstrap.num = 100, + bootstrap.parallel = FALSE, + bootstrap.cores = -1 +) +} +\arguments{ +\item{model}{VAR, RVAR, LP, or RLP class object} + +\item{horizon}{int: number of periods} + +\item{CI}{numeric vector: c(lower ci bound, upper ci bound)} + +\item{bootstrap.type}{string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used} + +\item{bootstrap.num}{int: number of bootstraps} + +\item{bootstrap.parallel}{boolean: create IRF draws in parallel} + +\item{bootstrap.cores}{int: number of cores to use in parallel processing; -1 detects and uses half the available cores} +} +\value{ +data frame with columns \code{target}, \code{shock}, \code{horizon}, \code{response.lower}, \code{response}, \code{response.upper}; regime-based models return a list with a data frame per regime. +} +\description{ +See VAR, RVAR, LP, and RLP documentation for details +regarding models and structural errors. +} +\examples{ +\donttest{ + + # simple time series + AA = c(1:100) + rnorm(100) + BB = c(1:100) + rnorm(100) + CC = AA + BB + rnorm(100) + date = seq.Date(from = as.Date('2000-01-01'), by = 'month', length.out = 100) + Data = data.frame(date = date, AA, BB, CC) + + # estimate VAR + var = + sovereign::VAR( + data = Data, + horizon = 10, + freq = 'month', + lag.ic = 'BIC', + lag.max = 4 + ) + + # impulse response function + var.irf = sovereign::IRF(var) + + # local projection forecasts + lp = + sovereign::LP( + data = Data, + horizon = c(1:10), + lag.ic = 'AIC', + lag.max = 4, + type = 'both', + freq = 'month') + + # LP impulse response function + lp.irf = sovereign::IRF(lp) + +} + +} +\seealso{ +\code{\link[=var_irf]{var_irf()}} + +\code{\link[=rvar_irf]{rvar_irf()}} + +\code{\link[=lp_irf]{lp_irf()}} + +\code{\link[=rlp_irf]{rlp_irf()}} +} diff --git a/man/LP.Rd b/man/LP.Rd index 1282125..4245675 100644 --- a/man/LP.Rd +++ b/man/LP.Rd @@ -69,6 +69,11 @@ Estimate local projections } +} +\references{ +\enumerate{ +\item Jorda, Oscar "\href{https://www.aeaweb.org/articles?id=10.1257/0002828053828518}{Estimation and Inference of Impulse Responses by Local Projections}" 2005. +} } \seealso{ \code{\link[=LP]{LP()}} diff --git a/man/RLP.Rd b/man/RLP.Rd index 2fbeac3..e5be863 100644 --- a/man/RLP.Rd +++ b/man/RLP.Rd @@ -48,10 +48,15 @@ RLP( \item{regime.n}{int: number of regimes to estimate (applies to kmeans and EM)} } \value{ -list object with elements \code{data}, \code{model}, \code{forecasts}, \code{residuals}; if there is more than one forecast horizon estimated, then \code{model}, \code{forecasts}, \code{residuals} will each be a list where each element corresponds to a single horizon +list of lists, one list per regime, each regime with objects with elements \code{data}, \code{model}, \code{forecasts}, \code{residuals}; +if there is more than one forecast horizon estimated, then \code{model}, \code{forecasts}, \code{residuals} +will each be a list where each element corresponds to a single horizon } \description{ -Estimate regime-dependent local projections +Estimate a regime-dependent local projection (i.e. a state-dependent LP), with an exogenous state indicator, of the specification: +\deqn{Y_{t+h} = X_t \beta_{s_t} + \epsilon_t} +where \emph{t} is the time index, and \emph{s} is a mutually exclusive state of the world observed at time \emph{t}. When the regime vector is +not supplied by the user, then a two-state regime series is estimated via random forest. } \examples{ \donttest{ @@ -83,6 +88,11 @@ Estimate regime-dependent local projections } +} +\references{ +\enumerate{ +\item Jorda, Oscar "\href{https://www.aeaweb.org/articles?id=10.1257/0002828053828518}{Estimation and Inference of Impulse Responses by Local Projections}" 2005. +} } \seealso{ \code{\link[=LP]{LP()}} diff --git a/man/RVAR.Rd b/man/RVAR.Rd index 8f30977..f93475c 100644 --- a/man/RVAR.Rd +++ b/man/RVAR.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/var.R \name{RVAR} \alias{RVAR} -\title{Estimate regime-dependent VAR} +\title{Estimate regime-dependent VAR, SVAR, or Proxy-SVAR} \usage{ RVAR( data, @@ -14,7 +14,10 @@ RVAR( lag.max = NULL, regime = NULL, regime.method = "rf", - regime.n = 2 + regime.n = 2, + structure = "short", + instrument = NULL, + instrumented = NULL ) } \arguments{ @@ -37,21 +40,54 @@ RVAR( \item{regime.method}{string: regime assignment technique ('rf', 'kmeans', 'EM', or 'BP')} \item{regime.n}{int: number of regimes to estimate (applies to kmeans and EM)} + +\item{structure}{string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short')} + +\item{instrument}{string: name of instrumental variable contained in the data matrix} + +\item{instrumented}{string: name of variable to be instrumented in IV and IV-short procedure; default is the first non-date variable in data} } \value{ -list of lists, each regime returns its own list with elements \code{data}, \code{model}, \code{forecasts}, \code{residuals} +List of lists, where each regime is a list with items: +\enumerate{ +\item data: data.frame with endogenous variables and 'date' column. +\item model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood, regime indicator +\item forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast +\item residuals: list of data.frames per horizon; data.frame of residuals +\item structure: string denoting which structural identification strategy will be used in analysis (or NA) +\item instrument: data.frame with 'date' column and 'instrument' column (or NULL) +\item instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NULL) +} } \description{ Estimate a regime-dependent VAR (i.e. a state-dependent VAR), with an exogenous state indicator, of the specification: -\deqn{Y_t = X \beta_s + \epsilon_t} -where t is the time index, Y is the set of outcome vectors, X the design matrix (of p lagged values of Y), and -s is a mutually exclusive state of the world observed at time t-1. When the regime vector is not supplied by the user, then a two-state +\deqn{Y_{t+1} = X_t \beta_{s_t} + \epsilon_t} +where \emph{t} is the time index, \emph{Y} is the set of outcome vectors, \emph{X} the design matrix (of \emph{p} lagged values of Y), and +\emph{s} is a mutually exclusive state of the world observed at time \emph{t}. When the regime vector is not supplied by the user, then a two-state regime series is estimated via random forest. } \details{ The regime-dependent VAR is a generalization of the popular threshold VAR - which trades off estimating a threshold value for an endogenous variable for accepting an exogenous regime that can be based on information from inside or outside of the system, with or without parametric -assumptions, and with or without timing restrictions. +assumptions, and with or without timing restrictions. Moreover, the RVAR may be extended to include structural shocks, including the use of instrumental +variables. + +\strong{State dependence.} The RVAR augments the traditional VAR by allowing state-dependence in the coefficient matrix. The RVAR differs from the more common threshold VAR (TVAR), due +to the fact that states are exegonesouly determined. As a result, the states (i.e. regimes) do not need to be based on information inside the model, moreover, regimes can be +determined by any process the user determines best fits their needs. For example, regimes based on NBER dated recessions and expansions are based on a judgmental process +considering hundreds of series, potentially none of which are in the VAR being modeled. Alternatively, a user may use unsupervised machine learning to assign regimes - this is +the process the \code{sovereign::regimes} function facilitates. + +\strong{Structural shocks.} See Sims (1980) for details regarding the baseline vector-autoregression (VAR) model. The VAR may be augmented to become a structural VAR (SVAR) with one of three different structural identification strategies: +\enumerate{ +\item short-term impact restrictions via Cholesky decomposition, see Christiano et al (1999) for details \strong{(structure = 'short')} +\item external instrument identification, i.e. a Proxy-SVAR strategy, see Mertens and Ravn (2013) for details \strong{(structure = 'IV')} +\item or a combination of short-term and IV identification via Lunsford (2015) \strong{(structure = 'IV-short')} +} + +Note that including structure does not change the estimation of model coefficients or forecasts, but does change impulse response functions, forecast error variance decomposition, +and historical decompositions. Historical decompositions will not be available for models using the 'IV' structure. Additionally note that only one instrument may be used in this +estimation routine. } \examples{ \donttest{ @@ -75,32 +111,34 @@ assumptions, and with or without timing restrictions. lag.ic = 'BIC', lag.max = 4) -# impulse response functions -rvar.irf = sovereign::rvar_irf(rvar) + # impulse response functions + rvar.irf = sovereign::rvar_irf(rvar) -# forecast error variance decomposition -rvar.fevd = sovereign::rvar_fevd(rvar) + # forecast error variance decomposition + rvar.fevd = sovereign::rvar_fevd(rvar) -# historical shock decomposition -rvar.hd = sovereign::rvar_hd(rvar) + # historical shock decomposition + rvar.hd = sovereign::rvar_hd(rvar) } +} +\references{ +\enumerate{ +\item Christiano, Lawrence, Martin Eichenbaum, and Charles Evans "\href{https://www.sciencedirect.com/science/article/pii/S1574004899010058}{Monetary policy shocks: What have we learned and to what end?}" Handbook of Macroeconomics, Vol 1, Part A, 1999. +\item Lunsford, Kurt "\href{https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2699452#}{Identifying Structural VARs with a Proxy Variable and a Test for a Weak Proxy}" 2015. +\item Mertens, Karel and Morten Ravn "\href{https://www.aeaweb.org/articles?id=10.1257/aer.103.4.1212}{The Dynamic Effects of Personal and Corporate Income Tax Changes in the United States}" 2013. +\item Sims, Christopher "\href{https://www.jstor.org/stable/1912017}{Macroeconomics and Reality}" 1980. +} } \seealso{ \code{\link[=VAR]{VAR()}} -\code{\link[=var_irf]{var_irf()}} - -\code{\link[=var_fevd]{var_fevd()}} - -\code{\link[=var_hd]{var_hd()}} - \code{\link[=RVAR]{RVAR()}} -\code{\link[=rvar_irf]{rvar_irf()}} +\code{\link[=IRF]{IRF()}} -\code{\link[=rvar_fevd]{rvar_fevd()}} +\code{\link[=FEVD]{FEVD()}} -\code{\link[=rvar_hd]{rvar_hd()}} +\code{\link[=HD]{HD()}} } diff --git a/man/VAR.Rd b/man/VAR.Rd index b79f225..d4fd756 100644 --- a/man/VAR.Rd +++ b/man/VAR.Rd @@ -32,7 +32,7 @@ VAR( \item{lag.max}{int: maximum number of lags to test in lag selection} -\item{structure}{string: type of structural identification strategy to use in model analysis (NULL, 'short', 'IV', or 'IV-short')} +\item{structure}{string: type of structural identification strategy to use in model analysis (NA, 'short', 'IV', or 'IV-short')} \item{instrument}{string: name of instrumental variable contained in the data matrix} @@ -41,12 +41,12 @@ VAR( \value{ \enumerate{ \item data: data.frame with endogenous variables and 'date' column. -\item model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of type +\item model: list with data.frame of model coefficients (in psuedo-companion form), data.frame of coefficient standard errors, integer of lags p, integer of horizons, string of frequency, string of deterministic term type, numeric of log-likelihood \item forecasts: list of data.frames per horizon; data.frame with column for date (day the forecast was made), forecast.date (the date being forecasted), target (variable forecasted), and forecast \item residuals: list of data.frames per horizon; data.frame of residuals -\item structure: string denoting which structural identification strategy will be used in analysis (or NULL) +\item structure: string denoting which structural identification strategy will be used in analysis (or NA) \item instrument: data.frame with 'date' column and 'instrument' column (or NULL) -\item instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NULL) +\item instrumented: string denoting which column will be instrumted in 'IV' and 'IV-short' strategies (or NA) } } \description{ diff --git a/man/rvar_fevd.Rd b/man/rvar_fevd.Rd index 8f2143d..23113a7 100644 --- a/man/rvar_fevd.Rd +++ b/man/rvar_fevd.Rd @@ -17,7 +17,8 @@ rvar_fevd(rvar, horizon = 10, scale = TRUE) list, each regime returns its own long-form data.frame } \description{ -Estimate regime-dependent forecast error variance decomposition +Estimate forecast error variance decomposition for RVARs +with either short or 'IV-short' structural errors. } \examples{ \donttest{ diff --git a/man/rvar_hd.Rd b/man/rvar_hd.Rd index 1f12fec..1e06fa3 100644 --- a/man/rvar_hd.Rd +++ b/man/rvar_hd.Rd @@ -13,10 +13,8 @@ rvar_hd(rvar) long form data.frames } \description{ -Estimate historical decomposition with contemporaneous impact -restrictions via Cholesky decomposition - taking the variable ordering -present in the RVAR object. Estimate one response function per unique -state defined by the regime-dependent VAR. +Estimate historical decomposition for RVARs with +either short or 'IV-short' structural errors. } \examples{ \donttest{ diff --git a/man/rvar_irf.Rd b/man/rvar_irf.Rd index fae85c2..b0fcc60 100644 --- a/man/rvar_irf.Rd +++ b/man/rvar_irf.Rd @@ -4,24 +4,36 @@ \alias{rvar_irf} \title{Estimate regime-dependent impulse response functions} \usage{ -rvar_irf(rvar, horizon = 10, bootstraps.num = 100, CI = c(0.1, 0.9)) +rvar_irf( + rvar, + horizon = 10, + CI = c(0.1, 0.9), + bootstrap.type = "auto", + bootstrap.num = 100, + bootstrap.parallel = FALSE, + bootstrap.cores = -1 +) } \arguments{ \item{rvar}{RVAR output} \item{horizon}{int: number of periods} -\item{bootstraps.num}{int: number of bootstraps} - \item{CI}{numeric vector: c(lower ci bound, upper ci bound)} + +\item{bootstrap.type}{string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used} + +\item{bootstrap.num}{int: number of bootstraps} + +\item{bootstrap.parallel}{boolean: create IRF draws in parallel} + +\item{bootstrap.cores}{int: number of cores to use in parallel processing; -1 detects and uses half the available cores} } \value{ -list of lists, each regime returns its own list with elements \code{irfs}, \code{ci.lower}, and \code{ci.upper}; all elements are long-form data.frames +list of regimes, each with data.frame of columns \code{target}, \code{shock}, \code{horizon}, \code{response.lower}, \code{response}, \code{response.upper} } \description{ -Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -taking the variable ordering present in the VAR object. Estimate one response function per unique -state defined by the regime-dependent VAR. +Estimate regime-dependent impulse response functions } \examples{ \donttest{ @@ -45,14 +57,14 @@ state defined by the regime-dependent VAR. lag.ic = 'BIC', lag.max = 4) -# impulse response functions -rvar.irf = sovereign::rvar_irf(rvar) + # impulse response functions + rvar.irf = sovereign::rvar_irf(rvar) -# forecast error variance decomposition -rvar.fevd = sovereign::rvar_fevd(rvar) + # forecast error variance decomposition + rvar.fevd = sovereign::rvar_fevd(rvar) -# historical shock decomposition -rvar.hd = sovereign::rvar_hd(rvar) + # historical shock decomposition + rvar.hd = sovereign::rvar_hd(rvar) } diff --git a/man/var_fevd.Rd b/man/var_fevd.Rd index 4d1b31d..be29b07 100644 --- a/man/var_fevd.Rd +++ b/man/var_fevd.Rd @@ -17,7 +17,8 @@ var_fevd(var, horizon = 10, scale = TRUE) long-form data.frame } \description{ -Estimate forecast error variance decomposition +Estimate forecast error variance decomposition for VARs +with either short or 'IV-short' structural errors. } \examples{ \donttest{ diff --git a/man/var_hd.Rd b/man/var_hd.Rd index 89e448b..53ffde7 100644 --- a/man/var_hd.Rd +++ b/man/var_hd.Rd @@ -13,9 +13,8 @@ var_hd(var) long-from data.frame } \description{ -Estimate historical decomposition with contemporaneous impact -restrictions via Cholesky decomposition - taking the variable ordering -present in the VAR object. +Estimate historical decomposition for VARs with +either short or 'IV-short' structural errors. } \examples{ \donttest{ diff --git a/man/var_irf.Rd b/man/var_irf.Rd index a820c06..c27fa20 100644 --- a/man/var_irf.Rd +++ b/man/var_irf.Rd @@ -21,14 +21,19 @@ var_irf( \item{CI}{numeric vector: c(lower ci bound, upper ci bound)} -\item{bootstraps.num}{int: number of bootstraps} +\item{bootstrap.type}{string: bootstrapping technique to use ('auto', 'standard', or 'wild'); if auto then wild is used for IV or IV-short, else standard is used} + +\item{bootstrap.num}{int: number of bootstraps} + +\item{bootstrap.parallel}{boolean: create IRF draws in parallel} + +\item{bootstrap.cores}{int: number of cores to use in parallel processing; -1 detects and uses half the available cores} } \value{ -list object with elements \code{irfs}, \code{ci.lower}, and \code{ci.upper}; all elements are long-form data.frames +data.frame with columns \code{target}, \code{shock}, \code{horizon}, \code{response.lower}, \code{response}, \code{response.upper} } \description{ -Estimate impulse responses with contemporaneous impact restrictions via Cholesky decomposition - -taking the variable ordering present in the VAR object. +Estimate impulse response functions } \examples{ \donttest{ @@ -49,14 +54,14 @@ taking the variable ordering present in the VAR object. lag.ic = 'BIC', lag.max = 4) -# impulse response functions -var.irf = sovereign::var_irf(var) + # impulse response functions + var.irf = sovereign::var_irf(var) -# forecast error variance decomposition -var.fevd = sovereign::var_fevd(var) + # forecast error variance decomposition + var.fevd = sovereign::var_fevd(var) -# historical shock decomposition -var.hd = sovereign::var_hd(var) + # historical shock decomposition + var.hd = sovereign::var_hd(var) } diff --git a/tests/testthat/Rplots.pdf b/tests/testthat/Rplots.pdf index 8267677f162bed26039cc787b1e233751433d5f3..160ffd82f86ea5d78f788ecddaf659b121d6ecb5 100644 GIT binary patch delta 16461 zcmZX*1ymeO@GlC%-GVy=3BD|gy9IZ53AVUOyp-000;p9ARqwv9+O+u^`oq%mkk}OtS~PR zFM#L&D@!uwLIfqn)*$f!c~g2pjF2o<*HwP}$nz(5FDEXJDB1$QxcC;y&m{KFKceWO z`c*ZGyu+4J9`BW7884|djn4J?h5T|aMN$WS-{bxbhjks6JuSX8KK(s@I&7M4Z)}Gb(adMopKd(-{Y{1kY9dd)4d>V-EYq6AT42ojf2xjJ2o4f1``esw%}U4e83FC3oT zQ+vD4P7PDX`@Xoh-tP%q1BI)%rkBLa6msJ=ae_bWz$DSccV4lA%5tZ{=&((@vv56B z;e68EjW2jz1^I!iKi}egiv72|<{rwJ!VP3`!^ZH@Xo(6Zl#xD99SA4fM`n;%Z@Rf~ zk6p9S;bCIDT{akD1$@l7nSz)tU?L^sKe?`GZ3Af^zwH<>B@@>gBj`>;Q^y=Ta6XF*AbOsWI?> z`hmkAkKMLz998jsB~pivmq46c(S~51)Y-?fS%YQ=SuIkXXd8nMkV4V;VAbuiXrm-< z1}368$3)%I$_hhOQvTvS{1+g`$4K>b!2V9mxFWADy|hK$m06-+S!$ z@uOl5hodddBISQavz>1R^$$M!sN_KpIDJ7=YW=1cz5Hj((-pr>DUFF&f5*q3X;p@H zu(P!}QV_41YWA%WLx@+Mb*oPj*G_-+R++5s9{%8q>AFH%X4g?->bJLas}!+xue|Ty z(fbzG_8ZOerp2tv<(74Peg4bZN&p~8TV2L58&_gs?DBQi&-adEd#{^hpAQ>aTAsI! zx%I@y+Wv%Rs`s8{=XTXEoN3gtDu zHA=5n3KQ42;h6m~B}eY)HIGMaVwcH7{3#D2HtPME^*M)*^i9EqXQT;wbpI5&2R$98 zgxyF4LYDhpHe}GXO&wU3xn30Z;p5}cmGuJD;YzfrzTV|T)AuT z)}QQ6ivPPj+?;A&?|#_XRVtan;>P@0lBRySbJ6C=J_iX(GpHey{$SnAm3OSa^p7Jz z!&30TaDLu>uJNGEMD*U!sYq0v`ijG`lIByoNzdHl$7@41GE&Fd&y-M5O{2pZ=g`jW zaWHppgv&P)e4UEDiHZH5T-7n?&G-G4wRM`SfV`K6-8E0M=i!^v1mM`pj^q8Nu2Ho2 zpP+z%e;1II>z=iz(o%Kx+I8=HS1!QXcwHpMuWunBV*&I2ET?J1I@{wnq#>Y4R76L? zn$BK977Lu=MEs`DASxMYiWGE5K~#eQu^h{I)!@{+5?h|ma&m0Dy*P|@7B# zhEh}kmaz;505|Lt@8I<#!})sXt2W69QQ0kUAj7?3vwZP&k=vM#d*YW_IC1QbO<=0p zrQWs%`AX>jj{ruko!N_Xn24Nr8K5|u6jn2cedpUz{29b{?oC>eB(mo1y}a*I);TtD zG+og-Y^q;MeHJ}yO6`G2Ruf3v{L zR}RHjU&pOg_r8zfJ1(0`UH>1wbU`O$ZkpD1s)20br_O59($?Fp+mcBa*W;mc=PtqS za@r4$E#Pf^X~(#dIfr<_rk9a*r%3L6MpK4p=Dfig#MVfg9Y}Jdqz$%&3gbMQe5+Rp z6aQvYH7gmmR}m)%c^qwuw30L$;0`T&mYSv*$zX@zH=*+GjnX1lhs6E7LX&c&+b(E| zjD&MWivL;)ntt&6KNpWp%&y2aijG$eFT73Ahs`%l%;vj6Mk#-)t{vjty%3G8Z^E|Oyk6<+hHxkz;DZUemhz#H0mbcm1CrD zK}Nf=olNR8-!?fl%@JCQzT~JIhyFxx!nk`=HS~n_gXE8SQj;u1bvhaKfmlenwl={K zv-j2o%F)_tXOX`4uc7Dy-Nxl(iUPJLGC+t&@PBLu{8YF&B;<}VK+A6sjeazT(1cRG zS&&Ma#{rWb{5jX4iUB&t-zjT)Tr8*fD#V;CQC(qF2UJ!)7QHULTsgn67RGC?`|j%V z-?ulY{(yte7zPl;gtLudFK)J?may+Z!?Zg2mpR`I1Z@E&4De)}|8 z(Ry+1U>8;S+GO1M@Tq;+!YeQBJo&8pD!A6OvMM*}O9_;PnH`vszH zIQvk=_G2Sd==id@zaOiNKu~|7_V<%T#}%A`kPkxxM26puDUYnzAR^SKbdjPlP~Dl( zn4kPO!P=Mcb<_2Y1x5iRE5ZjqNmtru8&^pJ&IVh$Z#Y_RryIYmq<1;7$5y@nu7}_!jeO< zW!gH&TKceeq1*+rrWp(HHiXUC8P=+e1>73JW}FRcKf!4xuua2A{oGHJ3){QPeu1|P z){un?peWkFTGNyz#wC&{@zGh~(d`9p0?#7G@;Red|4r4E zHL5vA-RM{HAf{e63EPV_1km=V0LGt-#lr)rz~K%HVRyU7N;N>`dnvz&ZFqxcdHWY;85?D!}}uA??)D@a6vE6c;!q za-ka>go6dRJ-VJ7c!}DF{2rK_+b>->|Gl;XD#?zSr(^gl+$7)rKwteH)l|;02v;v3 z1evszcjR&D?TzTQ$AP}1Cx_oLV_c|c8HEAzL4y4;^L}KEr65kEL)9@!VW3UiO^FR% zZYHmMz#Q+MeLOH}h9d4)n+0=z6RLGG!Zl?vh%6=iguJ>h1A#uHKwGF^&f98`prYWRM3qa;GrkV2!U{C!LP^i*snnQfiWJlne z>b}Fz8Yvrid(r=GitMrHEUPO%;e?y?I;q!vl z+@+w&;xUK#FPk1l+ZF(SK=ozGQ-4#5uB}lSrK&?j=Db6Jx_4$eJh#czbC3=zsZjZH z18sud#mrI!5g6>M$IU0km0z*QJ1RhR{t^{}-dR^X$IMV9*RxgE3z@4qDr{dn;5Y<{ zSJTH&m4b>c@0v=SkXwo%Ae*Ib+-i3~t)!z8_rhG(m943|qe&h6G{yciW)n0m(*kI; zE!@LY?S9ebs^FM2W*#-~x6f?U_{UY&ah!u|Dg5`i`Qy+}ioo6atiN}aZdvwZCFPb# zDCO3PC6THvP=I_GK=Ct65-&yEVGP9>emrMH2yIz?+;n1i%m@!eQihP3Zr}l1F+)Pt z(=8)%jyywF^t} zNKNu&9!)Y-n~Xf()V`!~>oA#_2G#1Kc}{r3ss6F$qo;Lj3r~7_8t#nj<=&L=M8saE ztg%;g0h(Wj9JA5CDu?r;eW>&yh_;Ai_}eTcR0S6QT%3=jCtI{(-XyZ=>@Nb_v|!9 zgv=KO01y%az*l2t2%Tc`Z}FZjF3Ub@h(!7vrz8bNQNEE?hyf^WdKx9v?Bc1d>zRPu zVb9R;V`G`IjraDd;HJ8#=k@mMHm&&cumt3}miynszXb`}v$>yN$*`W;JCOff75DYL z-a}plo?3OT6to$m`Z+^}n+(k5>{Z3H8AxcLd5WC$Ee@FAlGw?hhOPDkneEK9t@P zoHgqHtN$1N)h*BR3H|;7{dL><`SS7d_*?3#U4qrW-7)TG$*axAqFaQO6{Px4@;zdb zcXyJPd0ozUi8v5Ezx|E52P_D&Nb`julGw~+DfbG|M_2W5BZ(Aa^|~%G0_9f@f3d#1 zJLIb@rXeQnVQqJI@`l!PijRdW-Cs^lLlP4jYyt!6ZX2C`t<#hpau_#khM0&S>v4TN zL-}@9x`LH((zRjrjBQ+B*F|b_k>~$J>Ex>ZZb8D)NWBgc{MNbFoo`ggE4#E6YtQM9ohb*x0f$i@@f)=}9Wf>cQKPual-vb}m}9#r9Rmfc2T;Dx(Dh=jfq|vZ~qrjP4&* zMybBCxx|cj6gU5I8>4VK1@`X=PvZCX4=L~KU@CNzi zZCK7n6s?EQEr$Mt{41_AiXH-s{-~#b%+go#^sH8bM$j$N!gaxHl`)G3<%}}t2Hu6J z#+f$7lY(&wdYT6Q&8VbSm3=k!S6tE}vDV#jtHq;wovuyDZ#tQa%C-+smtxtE2QhIW zSYeA`V>#!`H$u_6VT<8TgRsj!yni6ncj-A^Lq+@nAvsJXlTnmA?SnzjCxw4o9x^+c zNDBwac-Lo>gTAa&3yd(3z|zlyhb{x~k5$)aVTT{+B;x{=SF(0R>M@q0%ZUvpCYVFyO)m?&Za*C5|B!VG_B zYKkQkSC4bAJ0taqfcjyDKa^+70k);-H!~bQ6;qgMxTHq&XgHAwjl!97!8yc`@jjLF zocE#J1X221+Z;_*zHgv9IRofKacRSYRIr<~Og1lzAL*pc6<_V+5W)UhUjqKU9arN# zws!V8h-eXCThJ7O-VA0)aiTY)&lSk4@`yASwA+Y(9cuXiF8;+~4`Y$}Y?w8kypDnF z$pgf!&Rq^Iku#}j7cL^OA+`5(WE zudPhhQh4BCZ+>ewi}4@nMgB%LcY`OeEDFjC>rLFQD(gkdNv(hgUR0&(aV>WFa=;!d zsEZU(K03*Xe1uj1fzmZzDhc`ef=yi=Lxq%7eMJMp{?eA+3Y%MF)BM-@ z=~?yZgmCG>##mfG^5&0CwzG)REa)QHlWgq9^NAX@m5mN)!C8AQ$zb3YM*vBHrRzy6c(cd=cpHPB<5w6&{CprFKI{AvIWj0ruf|#29Q+E zDZrjrBtasWNxNGDc3+r%#?(684M(tM+WRh2;Lf|E=vVIP*(VJ)_9a9ZONZ< zdem}70R=r{cw^O-mJ#@rx~w%~hoB+!exNNxb;LOPQz`$wyN$G0_SCBq=jJ?*Mv6Np zUnM2}-VePN@*VhSx*amXV1f`$1r$C1Uqe4V391y{TjPR%8+59x^#47wttaI@dnbvj zaM$ROYz7YZPc@n=$6%h|cdjmzPN>U9EoK&bYgXus)Gw-}`ZY*hCA5YEcGHs1UbRSA zbTATG+{(<>PX)^&NXg0ID-@q66pBOn=f|i~`Keo8o!??ys@l~!ycPyQPT@fqfq6|8 zZrC9-o9OQz5G!STDEAIFA;Mp-X$L&JG#&ZjWGiqiiZa++PD|qS#l( z@2ovQ{L#ZvJZ^Fk$qF(qH9@&VK$LRV8`@n!f%Qp0DJw`dP7jZ1T#C~`Oos>KO`EhT zcXK3?lZP{Eqc!hUDcSXp^xuomgGGiSH0iMv%?UVUWfN3MO=&mmAh^Z_pe`(CDM`7q z!&kU;fo`#ntGQ&~(`XVb$HTH{cycqET3F%~G|3#TtER;VAb@RVm@tU|{= zi}>jG5*_Xb=TyytHI?_Et|;EUA1!tzotoJhO`%ot`0NP|bizd>FGYGVOq=mABh3X= zq7>V^uf*?TaR=0YSwAG3X}2isiGbiv<%%RF5I+And9cR(<0;-<=y8DD<3h5I-U9Zd zI4c_a;8h^7Low-{7VGi}7mJ8Y^6MY-x0u8#Mu9l;I=rYVrOHh!YPEf$cJUOJxL})W zJ+uEzfVfAXfq2VcRZjty0IW-V))P91ZF%-M1SN=5s;C$}rEXkR!q{vbzn0K#$BX&l z?`eVqD$WPrRq2>)Q|O({-?q!GdOm>N;`$8dyiNAOMD~T7HSJoIBs+Ss85J zDG~Nt#F_@yAnu25%gI)4;G*KSVWGjEtnw|sN)IYp1`l~G2=|BqpkNba>PNRL;-E>0 zkRKTgAMQDx6fV@_0R*DnC3-h~)*KJiH`Bb-ZfpT?&1qm>XhBw6%fA7k21#FyYLL2P|f( zbx+|8N>v}L5y1Q30ZX+3>_Dp!)tp^?s*T*vIo&*hkjzM#FX_wTv)G*5PL?W;It~vJe!7 zA!<%`6p3#m4u8J;m8R!)c!e3w4uksOuVEscy>Haw)zTw)#6nNvuG!**%F-r=|1+*O zhErq0N%m%%!#4Abnf{NvML@WVsdk~u5M0RbGm7dj>G%NNA}DG>k$`)xJwxzgB6~(5 z4Az3$5pOMt1OXGiSJFFUx27rHgEhk~vbf|<13lO}V(&LjdlL0PMwpQRgB%gj6>Eif3x_h&yF%q{VcT+2#J`+(o|XU>6SRn zmvHoRyHcn^9qhbP=?=q>rkWgB8cC0ATjC)|xa0futBXuqlMw@M7xAVR?@=GLTux$t zawT$UZKvhZ#SpbxuBEN9TEwCSbDc!`o7MG1gta+uL(#@>bj(WDm?o*V;@83kF-_YV zup-3VmmF65E?uMw$R}GEQP6+>Tj6)ek*or)FVeK80$6uyPLTg1;s=Z!jzYt2>gFJ6 zIF)VT$`%kiYcTiVN*%oV-bx96d2lwra$cNwExW!?zBliqKdFz$jrd84fX^E{I>ZS# z8|?=+VG4+<$TqGqW|@_nwVglh^KKfs%Whn$!jRr1-(USkhQ*2-WSol zC4TuV$yj7cW=nn-NGAa{fumZAna>g{Y6rB~9qH`ZS3-nSv?UY$ zN`={k>rL;k5q-Fp)7Cc7wUi>DSaT&Z^U+iKJ|X`U$nnP47yW32Z0;mf0wH|wE+n){ z|Ms?ABFm2E&a;%{IsgSpjp~;8OBz4Nzs$8btat@M&IR}tisJ8y-tdU*aTDtCyS#JWg!ZB>Ziy;*pG$CRen><@=09dZ-g-Uz6OQ-q zvJ~=okVoZQNNeA(i5_ikDS55l^XVDuo?GJeX3G5aHtzzjQXyHB48s0=`RgIW(_X8exG55%ini|pliWn#Qg7b3svbpQN_}Jij{Ej}uSkkgc_-mE zDo?|k^gQ^U(tkwuiU?Bbv_ntyOFgF{DbVuS;vYEDd`Eyq^W={?coZZSKAq8 z$Q>iDSG(K?^}|}Uc9TMpwEYm5sr4J0|_14LI! z<&3|`D=S2a^8&A);&23EPzIXaJ(_#9+gW z9;`uuRyPYtvqY|MoZU_0ALL|@8_PSl2j6|8fvVawde292SS zcd9C&uWi7mSJ4CGGXF|Ir4@{#9aWm*up872lcOdl3B@JfRH$aI#$EV?omH2Vot>JN zAEgS14?s6&B>nflMrUzpUBf6iq}DZId}8ucj50bzYorY0m!s3TuG8ZKqBk%}9K&iU zc?gwF@xq4m!xCB-m!rsP?OW99xPC8LWl}AI5ojeja1-_NGTk5qg*H`xN2rKWKo5O~ zl_z+4iSB+-xCjGTE9j%u;|y5{6_YJNHokyJVScz+A1kuLKO)zPl{b$Rs4XU1g4S5_4U#E|>8OL$ST<(UPmglJpwXI`@mIC$EREdS z2Qos*;=zKdzb-v7jW?Y1rVMm8Ubu4$Fy~=A)`N5P;x@UyX4ey+{y?R2PuJO zG>V7SJh5i9qu2jvurBU-7hTf>%s1~KFlKC13ROmpu%~Y8gy!uv?g14Bn>)Yo@AotI~ zz)4K3y&_wMQ%rd-Ap~%{ZCdH^8J%1`2Ux0SFBBfG+m`>vDj84eb#4U)D@zC6*xEbl zqJDLGuLgt*Z_SUdvN`rK5&n`Gw?08~A=k|q7zBczz+&@nK!Awf$B@ripPFEE?$!9u zH@foWhxc3O z!%#Wwd%5%+CIO3M030I7hjtl-{^BSOhuz%j45y$Y!U*OnG-RZ6cmm5aeP_^_)k{Z` z-yk};qB}>7+WD=NH$1|j8M}xhX(XbQQlvA1){9&UVU4}{G%(eW>hTH_D@Y)$U{HTVYkubntSdJeA3qqHwsSgzsSN@R!*iT&X;uP;2 z=}MGT?TKUddpjR7u}j7bdKu}oxYHVgo=Ov?wAM9>NA-8&N+loIedOb_re@1%l;#9D zK|jua2-7--1Vl(1`Sxq2Xds`UQV?$aC67Z?*Wm{BRcT#kZ;gn6yr}9a6^xJ5xA?5P zD=E?Cb-ve0<6yYQb)pGlGZd#|15PgDpx9tDeOqFc?}&=Z_6U<0#j#>KIR*AZb;wBi zm>k>-i4w%)_1eX)%{#qQpM_xV*Ky{wWRO@(BGfWSjBy5sm~kH!%qxDTe&alexq?r* z`dEg`nvS1cnfjPND+Z2OBrzydlp&fbl6w?3<8hFU#H5Dwwx>E+`G5m<@Fu;65}Z{@ z0s%&EhT_}{#KN5?G-gTRFim8;&^iJJF`EcE-Hw9>Bes)Ni($Sg?EHtMJg4Oz_>F8A+}Cim2u8>+*? zgl<@H7PnMx^=LE3`Fei5pO|g5frBSKqnHVg{!xv6R0~=3?(fLH&ce2Kk3vxTsEo%R zON4XSoL*#Hep!GG?-$6nzu7r)86(3PLOP^|bkxc;VpN8~(TNyPk?xG_0_Ar0UvSil zO9aV|sjCz-?Xb&u?K5C~f}XU6`X(k8E~2RqYJ!cAi=${4%Q3EMA3}4to=yOs&A6f} zttbTz=NoXtpBxqaWWc)WJ@7;%`y93mWF~E0==@@dHll_ECcIbbD7$0_K_4K$E5m?X za>V1A&z2ekO>zcN{1VC&7oud_Vvcy00@Lv25spx_X%rONA(aAiG_Y2j72ytM`vvph z$=M!KDi4>Fp6Ea$1owG6bKq_3XBZ`@&rbqa917fmgJ|W4(%*3#m+AL`H;SrxWyssR z9@KHQrWIYg+?$y1ZE^S6Q+)_Rl zh@H#EH<4%p&OX|NqqlxPchcTGwujQgaj6OXfdsPq*fek2B%}F2R%=NDQzg!9tHn$5 zlri}dZK|A{0h%m*VUSTUxXTUy2TGSpQwPtL&oPoeuAdf zsQ|mPa&f

    X*&X*7tlGJbcR()G@|^Of8~4>Q#)L!F51Iq>DlOY8ZjprJ3^{Q;jTS*U4qqWZ zqU=+Rf=d)mT{b(mn$W!Q+7z^`Y?eATLIG}-(Q%Fw6dvSornO#5wJ z4}68mJbnko(2SI<8bKVbKuOMtU6&S*Smf=wRZPlt15nwmonQ$CrBss6%bTAqveiar zaF=5Ip+90h3x~|}dn!-a<{=hqGh_+pu?6!#Q^?yhAj@rDw1(rqkz_3cJc=H`bdfTY z<|s^C5mPG{Vaz?sedX@rIUGyEHg)f`tWVm-7NRchO6A@lLGFxe{LjLgee~w&DMh=x zB(5h>n9$m;iG%ouEQEGg_y@`${1>SYgL0rqwmwAlH^0abDP@e|vF&~lCmv&u14(S3cQVs&Rs7S) zXa)3lO5hbT=ZH8&!d;N_TgPGSFdu7e9isErT793Ao6>QHiIR9U;y0YK+$Wz&srtVg zOu!@OgHXE}_pyYw-l$Va!g~Hwh2skavFVn}S<^|AjylC`1O|w^&Yu$1j?*N$Cnrj| zFrG(3mlLXkV2)Uam5d4WSJ7D9WuMYw46N(yJ2KeD;+qp^7<)%7(fMq=sRI(P9K~lR zk=y%y@)s7DV<}68?D^SLg4W}O? z1vX*!0`C|Gg4rRjqtFPak&vU6761`r+%0#kURKKo&2^qE&V*8UEPAhL)(nqzhv!VV zPbDpn;@&?B`1D;i`H|GOIBAI^iY9z*QPrGZmD^cq(99+OdrL_Pi=|HIj*-viH)B9N zS3Vs*Ba5Zp&EAgBb!WguDHIjpw+)RTG}Kvdz`}iN@dSZG34Hd?So?6ehs={bLN-ga z!>WigRc$-TdHa+x0|=`x=YT?nlA}XhvcrruW6PdoMhFUL|aBv z7!>_!a{nO{^s{ag>S9nQQ@_hjd2ikeRpxi<*nBW!$1u#~HxX3z!O`b8TSoXjQm_%J z+OCu@1R(^qo}v#*k8xNzFOxB0dKGR&;5f)#boV~1h-^NHXX#?=^EZ+pTGk}>G&r!7H$q&OA)NiPF$3~H8?dPOFuL5I>P`}lRuXTC z&YZ^O&?x1~ojWYiS1zCJmfQmI<4SYocAZ76*|2kTRP_1FTZBx~O&4i=&k?_FcQ_Ze z2Ycl{6VIVege34+AQ;{GMtfCy#6SX*&J;2k$i6*rCAlc7U2R+j0iveNbr`1PVX5S{mQspdPviTWV|l-F&r9`t!?N} zO&2dUJgOLqy4Xc%loE8qn|y7CWq#GiCi1TYlg`RB4Y!9keQVf0e_s?&?WZmdGD2{>emvl5t-`Re^H;QbJRKQeghm_e6f0-hV%(m-DxUGVo;; zjQ>skc~=xK@ygG;d|qqDux#CoqW^Y-+|F&+@t-)3Gx5JL!sVM*dbhLmIACVkRve_K z1Z!}=_LoUdpYRy)(kyRS>_?q)w zw8d4L&%YV`w#^0`XI5zh*Juk=*7rWD>^(9FuK*!y4=n|M`)H8(j>CRMqOt8wqmwt> zO-+ymWacUFz5cW|h&;~N_58IeA&>}p{Sw5Lxq=j<4!A^eqS?J<8~#!Xps2~=$vMJC zsFm_84O%r?{x(M77r*c7+Ox z0>9DQRu(Qs6#)m+BHwuVzJZtS(W-wA@@jaCLtPR|GnDGZ$t?eNbfzPv5C_tUl*RvSL&c_)g27;07NdvFS7AgL_) z^2;CQe1?eh`n(%}Ll}D2mBwYSK!{~^D<$&*Hz97i{Qni;=}>I= z@^w|A&+y2243&`l~GOge6MJny1%?U+amI% zxO$N}0LIx#CE5?@GF;18{g5rhYg2|AZhhUWB?}7#C3-gSiGj)I^Ak<^34AXm8==8_#m*QRWi@0a< z@tNPm7@DJAf%jK0>EZyox^a76ugC|&vwM;odr11R1#28((WWY7w&lj?Q@OBSgU}7$>=Zv2Ha4=J(Dg*=FY^;xSWGIdd7p!r^YWq$}wd4W&QEd4b>v>Wc@}|rL>=@@N{O483W7fki0~K_?p*8z?Bd?h0?TObt!3r%c}j}SXWpM*c;*!t zb>Z_@A)QII7#g6m%hq|J)z22nhYF&~2ak^mhUC~3e~BV2C=seAm5Eha4++bsyJ=t} zV=8#s$V~e*L&%tw_v+VtW@#~4A6U>@nGCUTE%_$I|5m0jlpr3rx*^c242Q_79-Pka z+{b85U0jrrR^tUlx9{+8G3_GawW3|SJyjOf+!24IsQkiIs}#MrA8yR)t}B0HqkGFv zztQf0k9Z#sH8fb7wvku3oqB@nW#P5tm_c0pv2^@KEL^2rqH;0Gd@={JR21Jks^=fPUOiWmTcpG$QO z;;Fn_29Tq6RW9+>OmI0rK`3s>Xs`V!1yaG(WhuGQbP^YS#JfJeC`Xw&GJ;_p8-)p4T$|xITJjoiqwtMX(^oHbHZMiv1ji@FF&JZ4aVWoM-L3_$2VPJbd zK*+gPoaFnEDXbGWrgD)33{SwMsog(Rt(6U(-lSe4ok2sFK0%TNo#{tj!@6^di|yoj z3!p;Wes+GKt7O6ZFDb>UnHr~6Pvm)Es2L_B*tmm4Po7sM0i~cj1cFyOfYkn3NIC7m z42SXaJpaI9idDc~oRrZ|akpJb<7#(8$H;JcnIk2G2u1btIOL@mrWCS0uUr1WpcMwH z^GAOa8$G}vEw5(6IOl1Zzf4IjhL@5$nI z6jP{0&)Q=CxQ7ZMM#9ADC!*Ws+Nm*M`=dG9$!2EA16J%j^>MSAUy-$ zbMrcH+~^oBWO`K09zBxihacCb4O}xky&e-Ei&UfAc?&y<(;?r!SWZ*_OSk|f12oX3}|FQJ~_YBHAAhv(`}D-&y*YH z1Gfzaz-1_O4yBx6A4;l#kC zt0oPtn8|Poy|9f>U-7f&sw2U&Ye{5PV4Fn-$C!_`I+cBIfAc8(Fcx!5S5QpX)j-Pr zN>iy9~5tI>o-#E9(H%duE8To}AoUs7#Z0MoHXR2mx;ZB9^To5Rld zxdOPH;U{`ams5gCYP(y#l}wWQ3kp#z2s_SwWl z+yV56ps~S_vOq1CCKVNJ1Xr47In|C$dKzI3muw8i)9+qO97L@v@7X?PQEM;J5&bmD zXc91$n=}y6L@YqzQu&R&Q93g(%%;1-uXPjyMH?GL@aBYBV9(7Z*p@VZ+!$q`~U zsz8=$IrzlM_AXGZ`^ECMYMfTNx#F2+2jbdLBAn~Zm;Azs2*15VD0WEu5vMpLbD$Q3 zmY8jE`V{4TEw?r`lBj{>CzksaLh~s6DgKu_KONFPV|A*vbEaonUM>I~ zohg}>_pfIG;3ocmPb) zDB;g(@st%^(Ju91)EjcTPDQqslK8PR#9>t1%mdqjiMI#f44)*7q*RF8@3pol0bmk~2xiycHTyS~D0o@_#_;@Di zG2qUwxmD*(>XncCpQ^KWgy`L?}sR-Q=2kc-WiA z)GA5mY=3f2`HC#^3vb4|dN+(KsRE}T?3WP-o~x73dR*2k$tpv=A)W-*geWv;sf;s2 z5aNaoeLqz{JM{sPn{H)9Az3yw6SL(b4`J!{GnCy&*uI9{E9!i5`MXJAEmZ_&dMWd zOQ~?})1zokR`a%k<-yEMa2BL^)>@9Gz4`MG0~am14CS*a4%fZx4{B9`yH z2Uqe;R?$3v$@CoYc3E$8R;LEG!UZ{kHo z8%b<`V3`G{z!g&;UXpzW+-AA%5WhuMIB`O9v+#4@^-}I&SThIxplmaD`gDn6UxyPbT;< LnV96%6H6 delta 26184 zcmZs?1yG#9(l#1`d$8b;1b24}7Tn#P;10px;K2inyK8V++=FWtcVFCHFXw#q|F`OX zw`zBK-szd1r@Lo%dU|)`GGX@iVSaN{akKKVv2yaVbJ3x&C^(u+n7CPdqLtuhXJuz& z{lC08J!Tm8xQI$v9zM=^F+7km3??8DOWKSTa{0)>Uk~Ee@eLCQv^Rz!uBPe=sF_+M zMv8~g3wZ6AC@piSm8!dFt6wxe$>g9+H?PtDQ^n`}=pVWCc)SMg6ny@U>Vu>wn`lI3 z?*Di3{yH0Q0G3|dA6JSyU(OvK&ml~kv9$WaBF~<1r-6`sMlAG1M6xBC zc#r|lNT1QA0X;H57MDFtFjy)?<#s9wrRaMIW9=<3Lz5x>cE?;H>YQ9i(mfYLie<>) zRAVn3IcBEMyw4O7_LgB1KvbDb6#h}JF7&YQTg;wQ>4tYHmYc$?5>Zo8dpDNuqM9fN zCyRl_`jzpJ@TW^*1gho2ud@bLq=Et(iHU_m>DP!1qBrekU(s(&cH(Z3!ns2V^N*3A zZ<@3!CD88vy0IJFAb_swf8=^l+|&+K@}DBZ9w#ale&exQyU8J90Os=!^fl(%d1f@+ zBYc&6KDpDgB??)1A}~7cm{t9)kD)2{QyyV1dL-MI@ru>$9Ly?Z>&R_6u_!5~VhV#o z;!_&;d~Ft}O33(EQOc*(*wFoi@q@1ia*2sdzIVEPKs(comzjISiQp92JH-t zH=dj2BofILKj-F?0XgXlYVVT<+~~`QY0|`@^&%O}grUbI;UT2Zy+bPJql&cIhxnPH zTwC>(1-5q8V=9@Ocnvdx=|umOd+ixQ0dg1$?xo@Q8sVaJVHV7#rtTvqc>mFJG(}L0 z^ExA@u3?SS^wfy$j@Wk@;n$Bh85)NVsQMi>{w5p;l`v@hz~S9`O3zmndm;FkL|!sR zkT7{nkb-E1fYds{9aZn~^s^cgXYfH8KSae9JKZR7quG$or`geS&z$Hm==`7k{Znu_ z@=`B)JqhfNH`VPar8arvfM-1kCW_8TSW?}WBTR6t{zO8JaEc?wGWT>M7Teaufk^+NEyWaSlmBCTvE`fwyAnj*lNEBv7BFma|L2aD&! zH};ka11|o+=bGc`&ZfBiSSQ%)>^>)0AAAXBNHlQ}|JnUAFA^N#lKQROw>H8eitJ=8 z#}C>r`<$0y92sIXkW5%;z$_nm7J-I34@6@Q)uDI`fQt2EmAbz=ZLCWZT2Adr_d>`w zlH#@sO-$~hSWULf+N^aeUq-@dZq}ae33xFI(0CfpU5y2u!tN=^g9FhxYz0k3Kh8wI zq!2o4XAx+Y5TmE-u>ro18ED^ymdW{j$lRNXE(P@jT6}meKfnnyoM?~aXbotT%T;+q@OJvuRJcDuI&eP&a<;cO)agX+tKY zJ2SH$T)CirQf(<5MTaHw(FZ$5iX09)O{#f9bWmrQX zM2`z#5FXOQK>qdsgBy%bPd)=bXv`ufU!zV_CM@soa?T;lq}us3Pg&P(?yoIuRJ9 zsJJxGnZZJq28_}V>KUpCjMn_?t^z3Sr21bmtLt-AUlM6#bhnABAo*jOp`o5ep5QF$*KI4SVD*%yhHholS3h4Z%+jy&p4NZg}#E9OL^B=v_Q08p>3M8 zOZmFoS~@RSmIaOC&Fqbm^Gg=0Y0$Hf_uk!MWB7j=|+a0PS(S=W;g51>UhO8zsQ^U8fWgdm+6si?d2UTh>BiL zmeT?|X`CGt=69Rr9SSSoj53hqT<2_ya!l-cD0p#{kqKQWcP;nO$m*>KT$$a`SswnS zF;Lf(2&5zWJn=1;`Ty?x0?zu+)=Ag*Dl$(-v!@ebuaygIOdv;Ipc2t*y8G|WVd<*i zfT@$1yq;U1{k2~0a`vmTc}mK2@NCU;lFflCmJ}?LE2g<_moQnCeN*vm4l$^2C+zgS= zdTDnHp$e1xIG5~ATzA2Yezi2dbfMu{@5IKoGX-@PsXgQ67Pl3RLk+-09v2=$@&`vJ z(ioY)HK<)NZY`t~e->f2VnKg>WF+A|w{H91hHTwr(ZuIAOJk2QhORUptPW_p%x>d( zr<9Zydt{doI>#R*=Hyd@qlA`~K|4gDBwQ3pi#UdcrgTZ z2Kz)d{j4Mg=v>Wo=nCjuIu2Snp=oH5nrq4&n@60}T+`dkjSL3jV+7^)2vTE6<+Kre zHZJM0f+`?g(kT@VSyo4vX&}mZzMN7@p+DpUwT&utCxSC#X+TBI{S8aued3?oB7~&3 z{yPjd-#Wa9akxs4QD&4mii|}ER6udo?O&p`H4QMO#!}*Q!THqUbKa7&6|bwCB|~a7 zQQ>J(_G@<$oRtO58PZjNcp3Qb#5;u!fA0gTVl1P%nPx=aSleMBb8Fw;*9Lq5@1OtZ z&MYmIsEbn`9w0^Cu&YsbC0#IEZb-5U=r!t(AWsPgZo-@J_A&+K_X# zcO?HQ9+z2WUB!P0Iho{)wzD9(3Ns0*Z2~$hw^eMqvv@b8?q&UC8!)W>4<^xZRnSHr z{0dW&(C%R2VD1W1LUw^;=VRslKk@Kb3@I2Zh2(+_(a07!3)!qoKWDesYXr5h|I0y$^J|4^M69(V`vBSq4$K> zR#nT#7g@cns@JE>7w}`JL+9gu72xasdQz;{Lq5gp_j0u|_4F`?n6}pT0&u*xtiAd? z-kjcEzr^ab072R04l?C0`!`d6jWYRn+m7zDz<&GthK;pH&sp(I?SiU9u}Wvp^L;Hx zg{#nsTb>%>c?tPdHiM4N*2L8$o3N*?V+A+bMy7@+jL%A>jqWn;2kYzt1$fkpG3adq z+y7<@y)gBbY73_*wo9cHf%(vAvd>CFvcqzjn#f)UHN$zAG-GOyribkbiS27Ejct@i zmd7KrtSEJf2+l?}VNJyrhqtN_Q|y|!_h$AZ-}H>>%54l57Kdw1cGZ)IOLe!x6ZMQ!d&o{%iQt=MW6T@UR~TN669OMlEAh}p-hs!2gwz>s7_9&ix9^shR$ z?Lj!+Y)`2@=bRVM}5*wDei$B1y3^Zn7q!C zj0P#{^n62hi|dA|`_MtB39#qLkesVuY9+H*bbYJwV47a^;-eEaTW68n;REW>Ok(!o z)FV1V5VkRUV{7ka*y-54fk4#e2g3^rD(XE>~+Z})^W8`if<ozANY62c@ z|5e!yBlr}ie=U;B6RC}x(5hLbh90gm|3z7rxZx;>tG9ju3OuMQnNK+@I@7OqxC>bH z{kHB+{iDOGZYw>qt29Eeo~mqp58Xzi%$$6>oLj~T&=-y;(ul-}YMJ;S`=~rTL%j1*&n63FI|>gsn=dK3 zwAYd@OPnGAW{dZgY-iwg*^}N&!p5n6qMy5AGdHvTkI2Np|*(cR-W zKla{um$(k7uMf2QAE33!0v>&mdtIK!5a-AewEIjEKm-2a;RWh&%1=DRzk6(?6FFkm z+K;>jjVP1c^fzuRNqI;rf`kZ5R<=Gn`}2!2TqptM@jJittDHpiX*Z4Tqe2ThkSJEL z<>jMjQc6on!QbH>8QaA#jE$_^gjs^$k$3!jQ@Hd!$P`(aeys!f_QzEM&%mG`Uoc26BtB!r^=1G@|Mu$_aU*ixw<0|(KQlkNDY!bd>Tl`IEGgYCFl3w7fH!n?GlQi68ls6>a1p|6Lrx=EK_r z!<^^ZXZbL&JBQt|`oLijdAJPUn}F%Ci;;Dy-hks^L44Z-D2`5z^a5iQzVld z-6HQY`!YXH%I=IpN%s*owjTT1>x|-dt8M#9+DAa?O|j7?`yg_3OXn9i|84Kh$aN{x ztoIyuaW004B1E3u)cyr#2fl&i;UV3Tn^Ki?59?A7k(Z^H`Sx^ov-fc~s=G-3xClkv zv4SiMZ=)_x-Onhlhx~xE{Le^`zF34KtO+;~dMt$}E2@LKx3Q2GbboVStCw}Dtnb4? z&v}ojvP5LJP~+&eUYu-w zWB-WdLXs*1OP6i2U06m!>8lx+oabUA5-}*Jz}G;|wOv?{u^5Yep{k;IECugHM=SNH z^VRTR$mr*^)Ze{C+rjLhX`HgQ)al~Facsb|^Y_E_7GuCIYN?XYIQD-Y5xK9v6lCf8 zAe0(Oi6l<5u10yJWm<6t4N0C`|YoKcC$tdl{|hM58sW zw9|{M<2%(UjWNgU8Z=xl9Bo?d5<~i4nPV(-mxEv5PUJi3Mg>y+tZWBJNE@;vfNAIe zfp06ups_M)@;PvFxq)vj#vt!KF{f92<2JD1rT(#u`jyCDn=TJb5v17@m0<)MSLR%W9FLyv zJ1yjx#gtK_n9O<;VnhqXV>qb~cW_oT)Z@tY-BjMisN?+>mEQ}yjyk;G!C63lElDg0 zk;m5NWs*7eZ}_IDDCq~xBb(0+f3?%)&oU30r@fln4@ruMAPB5(#`4Ptu!EzIj~?II zk>y}8zDr-zItr@7`7&w&wbl2H>5Q2>o2$TR3EkzxY$__kfMU#@scS7Sq@^7^1y;~a zIM(s53g>c>DwlW(j-8ymiq0+HhAs6$4fM!6F{yiJ^Q}+>NkExF*8W{X ze9zUVNW+(`Bf+sjZ~#PG*PoCRy^0B9#)xyutxePorFKnFJY(@w!>2`OpQa8fNOj93 z$t>{kRr&ZSbC%0|BDPPCm?Tq`M9Yb@6TucXKb7W*aE>jl=1BVpAc!XUn7|Za$*wLB zCwc2RjrmKHaSmqcmjwXp5o)QZDGevy%loo@gp^sGEBu+0{Z_cW>*g*j_T63|;=v6S z%tKrAQzgmmzmd+)k7X3U{e0Q}#5J(O+L(JSO5eFJ3GWYq86JcKGI3ns#13(gc+Y|3 zC%wQ6zSW3)>2$e9pgUPAnp?{*|C)AcH_*PG3~y1tDtfl=YwNVF+M`Og!El4oCOW@C zb`Au(sk@y%0_*1ewL$8!a51B7B>!2}PVw)~lz%CgQO0+BKZJ2Z1d&8bXtz@6!pOQ2 z>In7g0ENx*jF^6A@Bh-+FjKv*WZ2jEO}o#d21($)J6X@)ABC#Y$L$FM z`A%WpV#gF@On~%s`ws4}{QL+};Kb|qpys~~-v}M+6Zre_dOVS=nXG{iXcQC!T%=vf zXi@en5Y|AZmLe`n?Rryf$n5N_u|h<^Z;lD`54fM5m%p_fKkUOD@gTI~Vu+KY%-u+m@#REq;U2 z=pUAqSC{j7c8+>p6^FpMfRmCfrF3zHfI{AOgJs|V5k_FcAC+9Y3C+|*wJpA_xKWc%t+n*T(KphQq^d)zcb67CPmOXVK%mBwhAnH3H{4-3Ps;=bRVvAFFWPt}82%MX|Vx#!cWd{(V2M*fB`p`kAx_ZM4` zHNUvXHo$K62530)IP%}AsYk3e-K3T+daTJIMyhVDhOdJL{*V*a(M)WZLYAz0MX=YFNj{-u(2yu^Dk`GKZ1e|@v>Y0 zXp$o$YF|{WOj)>Pok$^FKH;jhEYimE(tbRA@_+SlNi}*sOz!l3hFo-ZGQxGf+$W#D zUd|~J*13&Dtc^mlSxyt9o-gV=sa=8ZViZ4O_0r!fcz*VJ-5Xuwo5N&jPCdNz>Xa3j z1sdxha_QS<5%=N_Q6#k6Vco1zXv2B8@6rAaegctV5k!QDp3wY_(tk_F`w z5Hg*^BZ{-JV%r}~BJ`e>hj;3Q{mcB<#UJ5pL0o8ntXV`rc)m_##)?Z3^ihIu)|mDi z!Wf5Lmog*TB7s1`Y3LNco4|YZ5210Q)__z9Q8z1lObazF3|fB3R;5EP(;or?iXT3m z+D+B3E+(&->ZlHgvV5@$H#5vWVk2SMS*G!n9BLmwH(QM+LTLJ

    o+8DD#0-4esfz zNn3P6Rvv9pYE;W^zR*@VlUNqUlv9hm;HSy+QGvnZUS60YY1-D_dlEv5j950qSl}Gy z@^hP13dgk-q8p90V@QCxrDjiDxy1^m%Zq4nOXCTpyK|;cyP|Ka&N0&SnT*yrt3D>VHWpf?KvCj+3sl>32_%uPH7@NXhfiUDA|m$#-;XJ+71`6z`MAMYekr1UF#$Pef%Ox ze%7i$Fmm2}*F?FI8gJ)cqNx1I=r)LgLj?eLEe^Wx(07&LM|X zau0sOQ5>Vm&Oz5s6f|Al?KN9B>sE9!?F$Su?w~JSM>2SMf=%l^eXrl>EKrQi(saKP zLmUzv;*4CYEIHC~w$OLwJ(o-rqG11|nr^CeQAfefM>#PVtMw=QoCQTkgN2S^@u;mQ zpnAmqtW-V&GwtXCK*~rwlm623S3u?ZEPCihfj+s)bvakbskUQ*Rb2FZ;5| z1jZ8&3z4_mnYmxYM>V&zZ-cl@MSJT}bh`3W-GplODS1I0MOMz)hKM>ZZgoLIbNdcb zF~`2GK|hA~J)|7&yO&54zkKb@+h{#KG32zHP`h;0VQ=Hh1mrSRd1HvDQ5~}mGH+HnaT(y{i=)>tXd>VjPU;N$xvlPSbeF|g6@i0 z|Kifg*6ab<7Vtk6gmf!gpZPxBm9ELz8)NNqAJ$5G4C-`deD3G!zS1?^#La@^$x*#R z-f&kf3l>%szv4x>~P*n~|=Dm93_Y`@3`#qIU zxbnIT9xX{tpAPXUXs?qMLN>UmWm!+EG5SA|f!51GCv5vO&D} zAC#Mw3!c3-H*rCnNfkoQJ2fFEzl-(mhs|R9vcCv5E~{)#|Jd7>36&`I*&5>OZ4%fT z;sJV_9yf=$0E%$6^>WN`(6RB=BOa9^>xqA7ULSIQ>sl4451H`f_K?!~eLv}e@%V$d zZ84Ri)6onz4eyP2I1Y(#dml1}e|?4#rDC|?%?Ng}QuYb8vv)?pYbLS~i1+%dD{MH# z7TQwLM;E%1^|kz0AM2p4Y1z{P$=gT#Jb;;GiWzEZUauheVcaT-&K;o~?|no#ZV*yg zaV~QNk#$LU7DBItN|*A~n|o<(Smm##37I3jD;-3>u>RC~bHt0h|sk#1`DcDqJa~C4^rcHNE7F`)@gNO(gfdn!% zTh(5nw}vLf*#coLC;jEzolOiseQFD?Z^>t8H>!kjvYwuuBW7Ztv%wccojBvTy%Ghu zAo&REe+I0`vBq&nCBagxfUbN`Z&QH0>=_2{cR!a?-=28j@tz>aT>1>-(q!4VBbciF z_L?=O+OYo<$n;MwZJkzQ7`!DgA&u82{YjJ>j}H43&n{yO^V*dRe@^fyL%3MHvwjNd zYlS{F=XREFqg}TGasY|9;;mR-aDK2`e1f>BQeUTu03}}v1gpd<6Y2k^+$~JWq~fmq zXknqTP|biOAODf3S7ra0HBulSSVs9AJXUE zpmCDm-y5ReF~qv|{!^~WhR(>P@og$#RVuPGntKM9gV0b3$ud7}!}jaXw;sE^@;KJ?5}SG~c{g}8JPq{F z@XDXLd1c+KQ#0i&IC`m4bVh`N)g0eGp7OjoJ$Edt+v+BvRj#Ceu|fvpa<=F$1n>w-@H=7wZox zZBne#B?Ke4NXyhw7Wz9vQ5@1~aFLX66-}QO7%cdF{-BM zL$;*Au3ZWON~5St?pX@n*lQF7AEczy_>y+hm5Q&*KK(@3t+bzVBkbo^_6Il_{yb$Q z5K{lP2hiMW^9G>j|F(}i4hx)HZ0p3i6rt=c5Z=rYU{GPpjhTWc8qyrK%&8cg9+%#a%zl9y z4H05@N$CSm^sl9i4)$FZSv*xay0&>05nF6{NBcnsk`2uAm4(nS$u~P5t9#f;Om`;a z$$3VDYpp57a6}|OCs*o~Erzfz#D5~dFqtC@E>D8{@|nh7w8={5(+5jTTZ9VyDzghD zAh&nM(A=yS#iS;Ii+hrS5J8Ojp&grsR@10l0ahtt2WBM2Zfuc_wK@Ki{!tN2)Y)Hp ziT40%HAbKqW%ar*Ff_9nN1mSR6{s)JL6H9~e1M{BRwp$nf8 z{hmCW-r>>K7CD6FEZF48*Y102$|!6O01HtdX$RF^PH9CNRSN$YhhqHFI6W$-CgK#~ zpYiqr+0H?iSc=G%15Mq&c|#$-9Z`qciuF75ruTcS@>o0ywlj*1)c;OR(TLMj_o>Pq z_}RcV(_o5)5LNc>WW}J5`8^crR9i^ppBQJFtJoKOvX)VNTi$D$wgup+UEGT61L=;P z`{?AfOO0A=g|6uDg&SpGOa~0_>5IP7t^3b+Zw#Z>?OmSDiZwyz@Y4MCy2*Th;>6+V ze_Mgi7bnYKfmOmF>|cRVdbtB@(cv90dg%)3zn)zSM`V%fw__@0kqi2fOATPgbj0Sb zb;|W*@sy5(IW`N1XJ&Y>i0tEifnqxn??g@++{CSUhC~@vTVb2LRqY|7k{f@W6bb9c z_F$RV_|i_kOz5dWi)ii3O{oKe%pyhSUd^F8Zs6}ics~+{Eff>MN#O-i6f*)szWI@( zsJNicvC7Cu>Iy8%I0HRSndsl^|MFz4;esm}(}KTM6Uli7ubF z_$~4NIn#-4O8F926cKET`bo|Tgiy5-Sz#l7?*97~#Epbp37=LVYQ>rSUST)~H1uru z&w_g&ujNxK6}8SjU#pC!_Oao{u}oAU;1G}34{y#LXrWKV5Or-YL9c^__SQsbE*vPy zZQdU~wVGt^Sy$XUzIL<^)MUpxz0>(^>g@m1+!}L-^*8eHUG()tfm$!QfZ%HFpgVv0L&S{sY1BzS&IvXb+sqG097}Qji3jiVqpXS-7tfg9+4ymW}f7H4|}dC;LP$<>V^pvB_Ei7SCbQKX7y8fa)6EfE9Kca9qY2%S6BLH zp6Aihr?qX4Vw87k6B+dH9sxWPQq|;*wdK&K>vkxrG z2%85LrwAz7P&6)8fIsiMZn5cto$psa@5!Q31j%|&7ZGSGD0HKAeKJp5ZVP*(*j7Et zFa#1}Le=kC?`+`Li>e?A8#VDUY7z?@(Ml*LPxB42CR->|)GDh*e0JE!$7xS{bf)me zAGC0}|GY!{@>%0QW$uyon~S`Dr;f>@{5+Bk&|XQT1qR`a<87G= zK@v}yfH8(yz+WoS7GKR;nMS+*g{w>%2LYv5 zNo6wtfWQIsY8~Nxik|btVMi`whGpg;>zlXO&jzx*g8xtc#B9W_2!Wk~Q{iYuVr)kty&e;4{L_mZe=`mw> zNvbL~|F#oEU!0+Mc~9E-Hg87HKBti(wIBP~cgb;z;`=8rB(*f)^I$Xm81MklT(ar@ zuY?yH2gm@K1%?w8jrh;9qY-)P15Y@`Bz@OD5t7S&GZ8?%;RF^Wy=Xd zk^ysI$;+9}H(y|9j4@-0(M>l%TBd-}e|PbZXx{*A@E$MM+xQAFPL*m%MO+sN$|4)H zB;)q%hZ=xW!!%bFi-rlJ1t(O0WO}F6-2v!|AH({HdE~6TnH)C!#+#?i@KMds83$Y{we&sdDp1 z^bd+AdyLb83bFo~4!a_xrq_6H3#lKAEE6{7J>?eCBiTnqJ{m$Af>e7}e_a{^7N=~6 z4_3Sx&VgnrPPKu~fsj2xdRzD{_#2#<6SggGtSy+JG=Yb>D2@+gIOCH@3?26uDny!Y zr~t0!rCy$21bQue#G!->lQOxcIx6R%)FdliET%~^#|huTcL+n-OI}a+^4^!5E;|Pv z4CPxSv|DKts!_f{9H!0B)#e(r<^V3vobo~i$MHw-j}d&hb{z*1vW^ZWN4`8Z2iGQ< z+5UAU+oN|0C(+O0jvmY^Q_92xYHLw)D1a*r7dPYX_}RCLy6b%l1fw~A?RaH>0>Vt1 zyZAkNVQl`1Q=IJkjN%`mv2Npa0=-h)(4ovTv*IOY^Q+-*NR@z&^snJuzCl?rb&N4_ zP3$}uE3(@74~gQJzZ2nTEWli|9x&tyVIBQ3S><OV$0vr(y>_;1xMOuqbByaO5|%DFpSP*{~>PbKSYZJnFdWr6R`*;U9#ua|3p%~OOxOab&NVDFHFHHGM%qC+xR;%lG+_}BJ ztW016ET^ZZA}gKu*3Evc1Kd2ErPPmY9~BNtH~w(Z<=N4^)haz^3c4tB0U0iz;VTvi3JBWur=Td+iUWP*1X8P5Nc>MNO1gU3Rkx17^6iy-WKPj%EC?ujP~; zcJ>=zB&>j^6i!FC|E~|neHiKr?jf)~DBnmHy78}by1lk%JS-6f|Ln}K=j_9ti3>Ch z8jFK|XD=di!-h5pzb80ELfaslcaBg`2gVh4QMG4Khuw3g>N{XbwcvMJ5|o(if_BD=Rv^8jr_Y($GrHUkz=JTCLF0c)C^2#j6MO z1~ByEKbQMrRpKIF^zN z=WSc4v8Vli_l1lhMe60U|7V%u?Ln&2MhS_H4 z{NDwi=0~Je)nAPy;@?XrClxSu{Vu4i6b4Zi8i77F2&G7A6^uIY6g2uQYD8V=TB*!r z*hr&^g_rFHMm;}R2Ed6!8|GqC@4XLq|2?~g)$Qh}e$$QslSQL{xYeKnpl7 zuirQOD9OhUiR&HSW)WD?1`Yct;V9r7&1F#&gnboo5K>?j>@ejP6>;Rc7EmHNN-phB zEJkkevMpWn+{^(T#KQ?7Z*|}xUXN4?*cboRqG8hU>8J&Q6S9~X3!OSD&CxQd7znWq zgdrLyny`NRN%9(*3XDf21=Mr$6vweVcxHNWAK7V*kTp&Wxm$5;aohEmLj!Az7c5Y%E56$eR zlXxI|z@pE8ZACfj+KaPW_^rWxM09sM=6UAMezJO4q?*a4T$DF%*gUtfK|z4l@8bA+ zwY&OqaRYQbU7j8Sj&qOh?IFMStK1|XMs{R@=e5_Ho9wlhxnz;2tqI{7@axOcTlFUU z<-8ay@(kSky^eh}TgvWy4kvdztPy#!6TG=3f2DYyySRDncuEGmT325V@o#UGo&n%~ zGi3m{bCH7<5AjdmYOjegpN_Tv9qBaMA8WodTq(9*dsPEo9&8Ox8=An+N)Nhot!o{# z*B1>}R1IxL_s4TAY=)K!Zc`)ld*PG4<~3@&0;zj7;9+<8+AJ0Hz82aavkQGrgMiOUuX6UnuwH2JSM8 zsgz0U%X5RS;?Z@xxa}#QJ?5`=t|(}>8&}l)5^N35g5g`guQI0&J|T;Cg~V4@RF9}L4tB}Q1RtvXb^N$} zvTOKMW~mI!yEqPlU1bp>#~b1 zgJH0F&{2I2gl`x8*xyC{V3FX;;ZFf_^*SbQHzKsRaIW+lq}pB9qWFto#;6TsAaalS z@2qjmXCrz*?V^o9uH;wk97(I~i+d9=cF+KY(*_L2#c|kH*BFG^ZGCIqqgQ$sli;yk zUUojz##ke*Z=EQ}+kfUWT@jX=n_sfM&{}4AqHc9F5F1;4Tp6{^);0yJ1)#X1{(z@L>1E|he0DADPvnA?& z_)YlVW|pY+wvEcKTU^t^M!TYm#L*UUWX7hM2)4G8wc1K&qQn__D~W{w+|5BS_kk-~ z^*C6pm4FmdlxgJii${nP+RtZrhFaF8$JQi~_7tn;IW#*$7BWzgt{UMfAbecCCv?BFqo_*?Yp4 zKLt)RXilSUP6AfF1nSfKx>i~)D=p3{b7F2 zQ(Q0QAIU3N8m6q=Qxt5FAvxKAZU04c5iGhHPF#+2>q=d($@GTDdZd#` zFi*($4Xx%+$nHvsmw2RWK19G;q8L~5*6($mo!9qLcoNasb3$3r3&_3RVHnrxj^Ih_ z!!!8%{N3#f3mHY_UXZ7xUdH=GonKXoHum!Nz-C~Pv|MH%s=WRr@wt4QQWP&&lG!E) z()zhVP2M4w_+n;L!|t01KWs!;p%`wfX^mkQPQhbN(M=P#M64jE?D{Cx z45x%w@0_B4DH%~ruA)*hTYbr*J`eQ3lO*rk!0Q~E&3&Djb+eLm{Ewh;y#V#}rku|3 zrcS};RFet#zif0^Vy=WYRHA$&td)vmmgG|px}Q~Ink+fqEW+i_Q@8PYZ< zO|ZZoE!56$SSlpDs0~-E=ED(JZDOg&uQ2e&`nD)BZk;8w`$UZj)pBO|4V_PT+WlB< zvGF;v$Q>$voFM=?JeznPw(Z~hC49Y!7X7I#| zwf9@dhCgw~zBB1X=KJYSA)f!l#*DG*7v>0=*@Vf&fLF-9Dj)p_p8Ov$?*bGAomEE@ zp>31jO*t(~tiF)B;hnYpfDbQbqOlFJt`bS5x77e7PscY(@R{l@#Z#NsiLy z-Z0_)Cn3YcElBUu*VYIiC;X0Jk$s)@ZAJd#@ub+GI9=G(u(}{)!uWwFigS*Ye&bOT zp`ib^tPa=zQf4ua|7tyOu`N#>HO9uj@-Lg$TMlqI{z~3A1*?!9=$6Qhd22O=F!32z zfyQ7fWp(K@X}afKOL-3BwXm~41y<}x1^V7Foz)lNP5Mk;A}vB-FX)Y&FH9*BTi-g3 zQT6e|v%RgJUvnGg7Ry>HS6P-$sYXkdWY%Azut~`L;8)($rKJ`RkqEtz?cwk4k3=_&L=Sm`d`4C&>7BGF z_-735XbmMT1^ky}3`DvIYEE1-<6!xx_bNjP&*E>8Oye z8v!#0z%2z1-0?H|i7 z3EmL7J6Ur~{L@>!P_xe-vdk(3C#3Q(FOdiZ6Yqh%5Zag5yPLVA-k|+&SAiaE{&=Nsdcaz$cQ6V(C9t2P<0I?PeSkJoXUwn#PPX1@2~W ziYzS;``4tRdVeR|>ogvP{Z5T_S$ktPdt}Ux&&lTKz%GTkiSS_VU@sT{ zQUjZPZH6Ge78+Gf8nzh!@&Do#aP_gQO|1(e^2_2ITlrq=QN!)(>}Z==F4`3~0FB<`0a$svZTV1Ke` z=W2^-$Nsi%mu=qeX<-a`ZgDl(I*)AW&2Rz-D8-?cm4W|tS_P$Vx>xS^e7l}+>3YtH z|3$Pc$|FmHkeWP)I*6fkg8Fw#P+}Akq1r8gD(V_v52TMy*(C)Cr{)jg`L;GpF!??5 zI(%@$B>p~bCCfwB91xc!np#}7n`CbI`^|_h4W_4GdbQ9n6i?MS*~opmt@7=6udij< zLq2u2ecH$T1xCMtNPP`h0!Ig|ha$NF(_3w!2-&hyfrd<@05pPN%D<@FSJ&b=T zz$+Yf!quJfO*N_aDo9uSaQUXPR`nPikR9S9msT9%CoE;w9AUZZ#V$1wj_?q|*Yp5M z^#ar=!Tc56h>y*A;rC~xc|sfuqlbsb>RMLfk!;0F)T&ZT`Kc%-msQTPmglLGhkoh~R?rgJqj19tp3anVvXzccSfZmIap2OVltM<4vM z?D!3fi3MbA(u9^$pC|jBKX+kdY-4Y4zBfhG8KjUT{h;?{;;JKJero73qH!)iv&met zoZTVYUeRS+?jv#y2tT=eNG9=h}*y(3*h=uOIz_8=gjbO{)wNt522^rEzY2%)2pfbh98PPb^V*Xh#@nh=}B*n`IC;GEw$H~ z8G)B2^H`MTZmW{}?Ji6QN8p_$Qpvp^nIhkzvLd`C{N)Iu$)MI8Nzz%&!3COlw^c5R z;59b^r~K%}xvgQVqxw-XAnTKK=>CJ@xu?s+q96U5dpWaM$%_kpAYK_k_3~#g{aMxQ zQIJ=9DxRbn?O^n*M#1e$e|fq`mht$oPu`m1@pmg8*k|dns6k^By zpedo#u|5mo*xiPBpPlM^ylO(uUK)Q9m9`?)%Se9?5FZj_bDgUwNH(tNKb$NX~JTjk~2Eg7OhKtCZ^tM2%q zq-toqXL(P@@ccxr9?!8hjm;@K(XkCW4J6l>c~LTkb!0o`iRCA%XG`YK+iF5{8&Ckv%o83eP3yyvq@nu%NQ#GZdUzx1Hqj8Xi#b*r%>EDPY4 zz?(cC;#UhjCG8hPQj6)l zC2SY5lBByCq3P0;RP?=ymejll!qITauIx4tt%k{U#kCo`W z!Tt1(%r%CKMK~xrOA;sdXSBAN&-eLFW5<%5tp!w;+4n^d~)HTdOUFJ@9N=~a$ya7QA`_EAGB9L9Cy9N8EX}I4F^7d zdRoH9#m>OdsE7NTG~jeDX{FAIwy;I|FMu?nWutqJz97)s85EdL>9w=bnn(E<$GTjV zt|{IA*}Z$uRI7lkUbxgRxY&LC{>(!|sW0R8zi*eaeb9ZZpxgWGMGQvF%^F&kKC*NiPZ+a86>yMtK*1hO#VsdMG&y#N-mU_>!}9f(;U7 zkxK&0kOz~Qf+k3v$Mh>RZhGZ7dk$p3ak)^iGCak<<;aE|nOPNC#u}451a%b?J8-u? zrwv=Fi42()&l%NYF8noH1Ic<1M38C>g|^g&#$oioftr1W@HAB@Rp_=1Y8tK;%Rj-H z9Hefjr7)-+XJnvaoI$p_ue|kA8PCgVi=5gfK7mb>H@Lg+zJK)jaJ=gl(iqZ`uN@*- zRu#y#T;&<(hF|&gw>EcIMZUsbC;&a?U=!KN^^|ifZ?`mQ+v8t>e+z51(Sxow{{}#T9d!2 zc>BfdHDopF2c;~XR;uRLYdNXE*(L{_4g4(5uQ60FdUP`v$Oi=^S$4R7*D6OIe3Z^u zkZz;dxHGS6C`$)y9bHRrC)j#?ec;(=kq10Hj}~HUgLd|mzD9Bs#S!VE&9Y%r5DR>~ zT8sYJq+t~rb4NYxNRbm49G@CqXa45}{RJXk1<|%!F4c~ukl)(WljK{iRVt?KWS_^7 z5O6%U_VbDRqvc=c92OP}sxjBs4UVNweMVvvhBcesS7CwJ1d_bX*l)7qC407Gl;vuk zfSEe5r__hdy<1mXuMFM?=SuZSFv#w(_&oimZe% zto8syLyfeV_G=HgPBF$bm*Z#Ft=~?r-n=&Zs`cHD|Dp?brKkk-d(-qftK;2jJRkob zHRW~}a9beI!Y?N^N?$FKLzQH1RVudGH3od}_=vVS)3|a|si$|$Y4*Tr4&q~X z|0Ik;lRR+uZ*NWo)kw00-tHayIhs0uqO{T&4N}p`RfFw*nwH!GRf&NB&g#fU)@Yvt zS^i&2U0B9V_f*v+uY%npw?f*|1n9^TdSDUnn>4`g$40-v8C{h7E6?DYqq|YMBC1ql zzQ^3{J6Nq^9FMh=M?bP6r?`CdgA*IF%@L;6FAO>wb$0xjh__H6h^ot9>rUnI0wWV8 zqGReIr8E@CFh8Lfo!P!4H%qio?FdSIWFpBIcC?$bkXWxjex%eB83#DT+zK?&skW>D z?mKWs&-WeC4#<#7lXBD9)3bUSz1Kx$9v8H!44{UxugVA3CeqykZcpVSCd!2ULEmYulLzl@5@#U%yPP{;J)F=?x?smAZSr+{@p9>Shu**3KP0H zAbPAX6n9k_c)cphh$C@l$Z~WK4FPb$5rcGQN4B7`J%XEDs zmGYiO;R>Pt{?JTHY0JAkCQKEQEfB6#jeRw375CEgdXPY5cdf#+$h2WazIMPgIt1rV z|5}oSD2y2Ve##N<{%A#hVA{a;*7zMAA&Pc0D2a)LSC%j4F7%zDJ{`lwf96* zpNtFQnw_hWoM8PmSSXqR{f^1eZr1yG6C5jJmShm?18HRgC{m6_iL(iVqMLT5j$h=s z^r9hjAEW2uS9Tu6K-P|c6`Y8271K?VYPp`H+UWI}*C)+hgZuB;OKv z&)F#Gir9Kp+mP*x@mZ{-F$T}!<{PDbp0pFBsL1;2McWYTq^OEV;;VrT}R!>D=j3z;gCZasz3Ye)4EqQeco z^f93kYYJ8`iXkr0?TFA!7sx(z=%ZP;Nd|9>iw+zkxE&& zBQ-e9xPyYeXoA zN_yb_c`lSv&)}+Ea$0=F#co7t#p|1;Vw^un8f_nXXMc#+kkHVbj;D9b0Fh&dq2T6V z^PM{fg%NqGjV>rn_KqzcByiMdKGMKM6C*5E2#7L?_lKI(ix*p%b>{d-*gc$6k<<>wRU2|gDlca-Ir*r8D4mPNi)4FeX}|- zkEkYPz>2HE8@U3yeABuc?3mCpth`Nm(?y^hee^Q!Mc-?Y4L#yML(`$riCe3sEuB8L!|bDoewaRBBc^3 z&g@u&W(iMe2R81+(^w-f26o>=c&tCJX#aSzuyEE2){~}tQ=nDD)*D=qKuw{MmYtbV zNZn6oFkfzYk_0qxly-W?AO^ta5M(30gW@&mtW=m~0p!4E&jrvd_@2=FoPg|*O&6%) z!t3EbzG#{R2Y3k}4v3uA?Ce6YZ-ngp(<&Jv0G-tQz4>8rh;WZO=-P~Nu`^*#(=DY* ztQ9;59DLGcI1TgUWSR#XAm!%Ndzcq)kJk4(Xj3HbyVD`Z)F<}_rE)6Y69EzyW`Ta| zAt>p0;Nn8P(FIK$AlieR(H=%#9yw{`G@L9x*_5pzpO`Y!}%r{z%YMdt@m&MOt!k}am>m?i4p!1xf@rdIJi<3?LkG+ z%JD{U!fopPzk2Jl6R4pGu(>7vF!Px^-E-(BHvvh^-*>$nK2UiI=?a0{`X6hLUb!B4 z7UPG5Db2qzfRaXmjkamFw5s~JPXdX-r8f0x30XED`mwDYp5Ol3GQ^buv&t8_xa&yiuS4k+5%5gyFVy)1KvqO@*Q=q^q0fGizGV$E znUJgIL-cmAL)tc`{2P!ns3>&=)mDU&Zhj#Px=xn??h!$~U?>my-20~fF=w|J_4n=VWNJ{|Y>*OSaM z@_0d%tbVn}FIX0rgS2W1548!!#QQLBuMsHcUP@2tr}zN$O0T#(q{FedchHzz=Z-Bo zsHg~4+ApCme_!zvH8*deK-`szee3N8i}XG_oVHbNBrHp>%ZGBzmrkh)3CEJ~Sq-wUbIO5SM^fQd^e%BRV|bKAEK*Y-td~+!m(c0J zx60y);*6kKZ|S6MZ~0qNFoiiFz8Q`>JIjLZG(J+6{CcXF4I1V=R6WRx2Le(?;8omb zD^AJ!wTFW5Ld(todKks#PBvBOB-e~Qf|cjq>6JO?jZhfn=1yL;+hmpbQjlWA(+{A) zodYta@+%_)`#Hk*9YH^M7js^z*Y0_Asx=HE)uu3xL^m_#!@sSKT^ZKOdpcu@bcq|# z!CA$<1_}GnEhYpTWk<~0!JI+g33d|TS@0H{X?Q@OC$l;Z=IL&xC6`BBX1Qh6YjeVVjCXK8)+)!dehpBR7DkFNx*x#RsnUaN;N=1x|bfNeSM`s82@XHf6vKPierH3it6d z_K+O+5!ecVoN7N=xVpYs2d|E@QCqC*o&4(zjO+638Zru#kz1wbgw_mkLzhO@$c6KJ zxQ5Hk`N!*>z?p?@5KN#f8|w9HJ4h%9dX#zr6TdwVaI7Zsn2hJMzDZLP%-_!JICj@b zKH8~Bob5TAiJk2YZ1besICwRwHl*D8OMB1#F};7m@tfhu`r(&W(TB7q(CO>Xny_zo z(e!Df(vbhl6$#quwzc@%P!%G3#6XTJiVxzaWl;vm%Wg- z-i5UgAglMM4Da9v`3?tH6cgI>$N78KU$amZYNJaHOj!QkQL;oax8U8@v=_pdvo_( zZ!&+8WhJa&Tye|6h+THc{rRj3^*Ar>q&Ltal`aZ_+xm+P9l6{X?}^Ir6BEm+DWagk zj#oOS@ArHv1z&H>*=ja} z;kw0vML7-Dap<$?b15hDgo-WVvoXz$T~ z-qyMdDxq{dU_-dg$R&Y z1OW+3TQ!9^4cApM2T46ey^179qnkfGp1TV9T9`bf`tg(I$UWMItN7TF$&n_!L zL0&}98V48=nnl2Og=b?du%nK;F`{hb{q z*cx>v&`!$r8Mz!YOU~>-%cO9y`V_tqjCxEF?Y!02E)NZA`<=R#Tom0CYhRr* z*RhWo$+3y1OGs|Oh>K`cGMjN4L+u2;QOE0zU$&kaq8_vv%?hcIWpCr2=s`o8);wAU2EC)9h>Zp_}z zdH<4d&JDwAaSN0d{rSADL1>qgGnx}HTUv{4>SZUsOsZM05HFP%x)HK-Qlcp#d`Bi?5j_CqQ(6psx6^ut~7$jsrWhGrZjMoUjUCHkUGTO(&n_ftQL z1QM%w`Q|``?M^dY%uT^a?@iS=;TwkPZO={NQI(PTr|YJ0>RVAfC%|gpcR)EBNPEPc za)dE@i2N~PHTs)#vxH)nkw4gq>RA@5TsxTo2WIsgV;|CQyG@pT~pIF;s11a}iQh=yrUO#?HJ%-BwMV@oV72qj})| z#x(w%lY<8t-&D53Gf@?a=J2x6JqeEdhtn$}vTfM6UEKj<7Df$$J21X#S0XXjU?V}A zb){=qn2TCFM(HviF2Gm&x={nsF3v?EYD)J@=^T?A+oPiQD*t4bq{_#O(uLsg$A^G^ z)lE_bQ!pdEcng0E3Tv`$UN4)-9|RUMQXcvW4r5)tTz#gHDJ_Lg`T}39G+q!qOY)B* zHz7@@K_L=r51!3B&V3gE98|PB9ErrRtM#K=5e5$_!Pr7ann{}r5numm8yC8fu?NXV zC%hH)5M^^Ij+`x+{VqcCS>^j}?scw(=H0`>dqh)p1kGc$-H%;!;V&{SY60JJ;+oJs z%6WMCeqHErYiv)QzH@#ZuEz1#sN9{Qgm!_p0@j3A!$1Cxaf}&J|0GTE3?GUI5ldYz znI?N6N+udWi6d|g(YMQoGrQxAOF%yaAp|I**3;T!TL)F9rykDsO^;A776ErB&MDpC zwA&*;Y9C9wcDeXW4eh^l1*%;nHjX8yZ+~TV!9!N-Bzq75oGzuTBfA!hbOvts+}xdU zZ`|Yxz$doarGQqQn$^}a%jH?rzA6D$Vyv?Jqbqw=nR`8_B5#q|}Y+(w64# zpA<&)w%rPG#}BG<^Nv``ZM0fR8#dPGu70j* zs71;n`%y~1Zp|(4F4rXIp(vWU#u&dhO=mnK;TF*bUY89HycOdoa4%<{If5+~W;*#6 z$ueAR$BBrm(&3zRDRZD4QnuY;ONsYUf!mv=nFRalFE#yu=?Ke^&C<|x zbEIg3uoU8Z|1`vSvYFbvV~SzX7Y6oReXPnA6m6c~v!cZYFPxe*Q<(4Ws&kaUa~Cpw z!=&#gzxn7cJ|q_?{OJZlH0QLQwoVz!&qEOyr}llrp#8y%twn@(Kqn}0cR-a@ziZfO zyt!JG0|q?y;P;yM=_(2c$Vt9NZa%91{*Zv!i}NV2rQN;oIsc~W>6wf0`#}UMk1JV1 zQgT!269(p5tr@P`{dT)2$)+R5{Yhg9Cs%8Ge>Y@qD~sOO2+FQ|-#GqgqVr^}m||bu zYOMIU?WeQFyGOubO2Kd!BSo((cgly`MJyi96`O$ilG}0%OSa&Mq)LCk`12p-^)ETIgIW%)HNYnmQ0@+MTN^ z1b%deu_v%__rDDk5Z_cl1W*nH)QX>0>hG-3tE0S_l$RPA5!YjHl)dT?>EqdL z1^tx69m+H0Pr4LPYdjKh6m_jXhg-i8$UC}j6_Z7j{MduE5~Qoe-R#43zm}T1gJxd& zX{H2BEiGTGult=G(WL?&<$OLiDtf9)=!BC!X?C~awQkIG;gRoC}I#6Imj*RUT zmC|HX?J`YZ0j>U`zr7sBf&L=dlG?^JqNc!4c|uQ z)Z{n5b%EKL^sz%C}qg z*_|f*-;q`%pM5}XQMA=X&;Q!Xxpri*9S|7&{qjN*(tNb$I^rwdhD`QFw<8U}I~nyE z7IT@W(-D}ttxVOq`)7M7rMYbk}tly)f@uc4D2fdDs z+UK7bSXVpP@9EvtVln4~T%IjoUMJVKc$zrym%;;nLGWRIT}FQ4KX2xYyz;Kza4$yT z#D}svP_aKmC=|l0=;x*U%GxCf783gYIghd$z`2P2k7^49H)u*Zu z*>MP-XriII4j?YV>+bF9We2mf_k!>X3Gv#xdco{GJ)IbN9Xzak>==2iZM;G0yf#iA zHr_7wFgrg+UN0w@El6^)w()R>+%N0s@H)8gjA%!s7hk`HZU|C=T#A`Ipu9{^JSjh median(AA), 1, 0)) + + var = + RVAR( + data = Data, + p = 2, + regime = 'reg', + horizon = 10, + freq = 'month', + type = 'both', + structure = NA) + + expect_true(is.list(var)) + expect_true(is.list(var$model)) + expect_true(is.list(var$forecasts)) + expect_true(is.list(var$residuals)) + + var.irf = IRF(var, bootstrap.num = 10) + + expect_true(is.list(var.irf)) + + # estimate VAR with short-run restrictions + var.short = + RVAR( + data = Data, + p = 2, + regime = 'reg', + horizon = 10, + freq = 'month', + type = 'both', + structure = 'short') + + expect_true(is.list(var.short)) + expect_true(is.list(var.short$model)) + expect_true(is.list(var.short$forecasts)) + expect_true(is.list(var.short$residuals)) + + var.irf = IRF(var.short, bootstrap.num = 10) + var.hd = HD(var.short) + var.fevd = FEVD(var.short) + + expect_true(is.list(var.irf)) + expect_true(is.data.frame(var.hd)) + expect_true(is.list(var.fevd)) + + # estimate VAR with external instrument + var.iv = + RVAR( + data = Data, + p = 2, + regime = 'reg', + horizon = 10, + freq = 'month', + type = 'both', + structure = 'IV', + instrument = 'inst', + instrumented = 'AA') + + expect_true(is.list(var.iv)) + expect_true(is.list(var.iv$model)) + expect_true(is.list(var.iv$forecasts)) + expect_true(is.list(var.iv$residuals)) + + var.irf = IRF(var.iv, bootstrap.num = 10) + + expect_true(is.list(var.irf)) + + + # estimate VAR with external instrument and short-run restrictions + var.iv_short = + RVAR( + data = Data, + p = 2, + regime = 'reg', + horizon = 10, + freq = 'month', + type = 'both', + structure = 'IV-short', + instrument = 'inst', + instrumented = 'AA') + + expect_true(is.list(var.iv_short)) + expect_true(is.list(var.iv_short$model)) + expect_true(is.list(var.iv_short$forecasts)) + expect_true(is.list(var.iv_short$residuals)) + + var.irf = IRF(var.iv_short, bootstrap.num = 10) + var.hd = HD(var.iv_short) + var.fevd = FEVD(var.iv_short) + + expect_true(is.list(var.irf)) + expect_true(is.data.frame(var.hd)) + expect_true(is.list(var.fevd)) + + +}) diff --git a/tests/testthat/test-var.R b/tests/testthat/test-var.R index d1e149e..47e467c 100644 --- a/tests/testthat/test-var.R +++ b/tests/testthat/test-var.R @@ -8,7 +8,7 @@ test_that("VAR workflow", { Data = data.frame(date = date, AA, BB, CC) - # estimate VAR (with lag selection) + # estimate VAR (without lag selection) var = VAR( data = Data, @@ -51,7 +51,7 @@ test_that("VAR workflow", { irf = var_irf( var, - bootstraps.num = 10, + bootstrap.num = 10, CI = c(0.05,0.95)) expect_true(is.data.frame(irf)) diff --git a/vignettes/getting_started.Rmd b/vignettes/getting_started.Rmd index 8dd02bc..b7c99e3 100644 --- a/vignettes/getting_started.Rmd +++ b/vignettes/getting_started.Rmd @@ -160,7 +160,7 @@ Second we estimate the associate impulse response functions, bootstrapped confid var.irf = sovereign::var_irf( var, - bootstraps.num = 10, + bootstrap.num = 10, CI = c(0.05,0.95)) # plot IRF @@ -225,7 +225,7 @@ rvar.irf = sovereign::rvar_irf( rvar, horizon = 10, - bootstraps.num = 10, + bootstrap.num = 10, CI = c(0.05,0.95)) # plot IRF