diff --git a/.Rbuildignore b/.Rbuildignore index 044403a4..9e0198f1 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -15,3 +15,4 @@ ^cran-comments\.md$ ^CRAN-SUBMISSION$ ^revdep$ +^python$ \ No newline at end of file diff --git a/.github/workflows/py-conda-check.yaml b/.github/workflows/py-conda-check.yaml new file mode 100644 index 00000000..12b55734 --- /dev/null +++ b/.github/workflows/py-conda-check.yaml @@ -0,0 +1,72 @@ +on: + push: + branches: ['feature/python', 'feature/py-*'] # change to [main, master, 'hotfix/*', 'release/*', develop, 'feature/py-*'] after finish +# pull_request: +# branches: [main, master, 'hotfix/*', 'release/*', develop] + +name: py-conda-check + +jobs: + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.10', '3.11', '3.12'] + + defaults: + run: + working-directory: python + + name: ${{ matrix.os }} (${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Set LLVM on macOS + if: runner.os == 'macOS' + run: | + brew update + brew install llvm libomp + LLVM_LOC=$(brew --prefix llvm) + OMP_LOC=$(brew --prefix libomp) + echo "LLVM_LOC=$LLVM_LOC" >> $GITHUB_ENV + echo "OMP_LOC=$OMP_LOC" >> $GITHUB_ENV + echo "CC=$LLVM_LOC/bin/clang" >> $GITHUB_ENV + echo "CXX=$LLVM_LOC/bin/clang++" >> $GITHUB_ENV + echo "CPPFLAGS=-I$LLVM_LOC/include -I$OMP_LOC/include" >> $GITHUB_ENV + echo "LDFLAGS=-L$LLVM_LOC/lib -L$OMP_LOC/lib" >> $GITHUB_ENV + + - uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-variant: Miniforge3 + activate-environment: bvhar-env + environment-file: python/requirements/environment.yml + python-version: ${{ matrix.python-version }} + auto-activate-base: false + + - name: Verify conda + run: | + conda info + conda list + conda env list + + - name: Install + run: conda run -n bvhar-env pip install -e . -v + + - name: Verify installation + run: | + conda run -n bvhar-env pip list + conda run -n bvhar-env conda list + + - name: OpenMP check + run: conda run -n bvhar-env python -c "from bvhar.utils import checkomp; checkomp.check_omp()" + + - name: Test + run: conda run -n bvhar-env pytest diff --git a/.github/workflows/py-github-check.yaml b/.github/workflows/py-github-check.yaml new file mode 100644 index 00000000..fe0ddb3b --- /dev/null +++ b/.github/workflows/py-github-check.yaml @@ -0,0 +1,123 @@ +on: + push: + branches: ['feature/python', 'feature/py-*'] # change to [main, master, 'hotfix/*', 'release/*', develop, 'feature/py-*'] after finish + # pull_request: + # branches: [main, master, 'hotfix/*', 'release/*', develop] + +name: py-github-check + +jobs: + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.10', '3.11', '3.12'] + + name: ${{ matrix.os }} (${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Verify git + run: git --version + + - name: Set system dependencies on Linux + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y libboost-all-dev libeigen3-dev + echo $(sudo apt list --installed) + echo "EIGEN_INCLUDE_DIR=/usr/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=/usr/include/boost" >> $GITHUB_ENV + + - name: Set system dependencies on macOS + if: runner.os == 'macOS' + run: | + brew update + brew install llvm libomp boost eigen + EIGEN_LOC=$(brew --prefix eigen) + BOOST_LOC=$(brew --prefix boost) + LLVM_LOC=$(brew --prefix llvm) + OMP_LOC=$(brew --prefix libomp) + echo "EIGEN_INCLUDE_DIR=$EIGEN_LOC/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$BOOST_LOC/include" >> $GITHUB_ENV + echo "LLVM_LOC=$LLVM_LOC" >> $GITHUB_ENV + echo "OMP_LOC=$OMP_LOC" >> $GITHUB_ENV + echo "CC=$LLVM_LOC/bin/clang" >> $GITHUB_ENV + echo "CXX=$LLVM_LOC/bin/clang++" >> $GITHUB_ENV + echo "CPPFLAGS=-I$LLVM_LOC/include -I$OMP_LOC/include" >> $GITHUB_ENV + echo "LDFLAGS=-L$LLVM_LOC/lib -L$OMP_LOC/lib" >> $GITHUB_ENV + + - name: Set system dependencies on Windows + if: runner.os == 'Windows' + run: | + choco install eigen -y --no-progress + choco install boost-msvc-14.3 -y --no-progress + $eigenPath = ( + Get-ChildItem -Path "C:\ProgramData\chocolatey\lib\eigen" -Recurse -Filter "Eigen" | + Select-Object -First 1 + ).Parent.FullName + if ($eigenPath) { + echo "eigen is installed in $eigenPath" + } else { + Write-Error "Wrong eigen path" + exit 1 + } + $boostPath = $null + $boostCand = @("C:\local", "C:\ProgramData\chocolatey\lib") + foreach ($cand in $boostCand) { + $isPath = ( + Get-ChildItem -Path $cand -Directory | + Where-Object { $_.Name -match "boost" } | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 + ).FullName + if ($isPath) { + $boostPath = $isPath + break + } + } + if ($boostPath) { + echo "boost is installed in $boostPath" + } else { + Write-Error "Wrong boost path" + exit 1 + } + echo "EIGEN_INCLUDE_DIR=$eigenPath" >> $Env:GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$boostPath" >> $Env:GITHUB_ENV + + - name: Verify Eigen and boost on non-Windows + if: runner.os != 'Windows' + run: | + ls ${{ env.EIGEN_INCLUDE_DIR }} + ls ${{ env.BOOST_INCLUDE_DIR }} + mkdir temp-dir + + - name: Verify Eigen and boost on Windows + if: runner.os == 'Windows' + run: | + dir ${{ env.EIGEN_INCLUDE_DIR }} + dir ${{ env.BOOST_INCLUDE_DIR }} + New-Item -ItemType Directory -Force -Path temp-dir + + - name: Install package from github + # change branch after finishing the feature + run: | + pip install --upgrade pip + python -m pip install -e 'git+https://github.com/ygeunkim/bvhar.git@feature/python#egg=bvhar&subdirectory=python' + working-directory: temp-dir + + - name: Verify installation + run: pip list + working-directory: temp-dir + + - name: OpenMP check + run: python -c "from bvhar.utils import checkomp; checkomp.check_omp()" + working-directory: temp-dir diff --git a/.github/workflows/py-pip-check.yaml b/.github/workflows/py-pip-check.yaml new file mode 100644 index 00000000..509db027 --- /dev/null +++ b/.github/workflows/py-pip-check.yaml @@ -0,0 +1,127 @@ +on: + push: + branches: ['feature/python', 'feature/py-*'] # change to [main, master, 'hotfix/*', 'release/*', develop, 'feature/py-*'] after finish + # pull_request: + # branches: [main, master, 'hotfix/*', 'release/*', develop] + +name: py-pip-check + +jobs: + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.10', '3.11', '3.12'] + + defaults: + run: + working-directory: python + + name: ${{ matrix.os }} (${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Set system dependencies on Linux + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y libboost-all-dev libeigen3-dev + echo $(sudo apt list --installed) + echo "EIGEN_INCLUDE_DIR=/usr/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=/usr/include/boost" >> $GITHUB_ENV + + - name: Set system dependencies on macOS + if: runner.os == 'macOS' + run: | + brew update + brew install llvm libomp boost eigen + EIGEN_LOC=$(brew --prefix eigen) + BOOST_LOC=$(brew --prefix boost) + LLVM_LOC=$(brew --prefix llvm) + OMP_LOC=$(brew --prefix libomp) + echo "EIGEN_INCLUDE_DIR=$EIGEN_LOC/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$BOOST_LOC/include" >> $GITHUB_ENV + echo "LLVM_LOC=$LLVM_LOC" >> $GITHUB_ENV + echo "OMP_LOC=$OMP_LOC" >> $GITHUB_ENV + echo "CC=$LLVM_LOC/bin/clang" >> $GITHUB_ENV + echo "CXX=$LLVM_LOC/bin/clang++" >> $GITHUB_ENV + echo "CPPFLAGS=-I$LLVM_LOC/include -I$OMP_LOC/include" >> $GITHUB_ENV + echo "LDFLAGS=-L$LLVM_LOC/lib -L$OMP_LOC/lib" >> $GITHUB_ENV + + - name: Set system dependencies on Windows + if: runner.os == 'Windows' + run: | + choco install eigen -y --no-progress + choco install boost-msvc-14.3 -y --no-progress + $eigenPath = ( + Get-ChildItem -Path "C:\ProgramData\chocolatey\lib\eigen" -Recurse -Filter "Eigen" | + Select-Object -First 1 + ).Parent.FullName + if ($eigenPath) { + echo "eigen is installed in $eigenPath" + } else { + Write-Error "Wrong eigen path" + exit 1 + } + $boostPath = $null + $boostCand = @("C:\local", "C:\ProgramData\chocolatey\lib") + foreach ($cand in $boostCand) { + $isPath = ( + Get-ChildItem -Path $cand -Directory | + Where-Object { $_.Name -match "boost" } | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 + ).FullName + if ($isPath) { + $boostPath = $isPath + break + } + } + if ($boostPath) { + echo "boost is installed in $boostPath" + } else { + Write-Error "Wrong boost path" + exit 1 + } + echo "EIGEN_INCLUDE_DIR=$eigenPath" >> $Env:GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$boostPath" >> $Env:GITHUB_ENV + + - name: Verify Eigen and boost on non-Windows + if: runner.os != 'Windows' + run: | + ls ${{ env.EIGEN_INCLUDE_DIR }} + ls ${{ env.BOOST_INCLUDE_DIR }} + + - name: Verify Eigen and boost on Windows + if: runner.os == 'Windows' + run: | + dir ${{ env.EIGEN_INCLUDE_DIR }} + dir ${{ env.BOOST_INCLUDE_DIR }} + + - name: Install requirements + run: | + pip install --upgrade pip + pip install -r requirements/requirements.txt + + - name: Verify pip + run: pip list + + - name: Install + run: pip install -e . -v + + - name: Verify installation + run: pip list + + - name: OpenMP check + run: python -c "from bvhar.utils import checkomp; checkomp.check_omp()" + + - name: Test + run: pytest diff --git a/.github/workflows/py-test-coverage.yaml b/.github/workflows/py-test-coverage.yaml new file mode 100644 index 00000000..ee4c44c8 --- /dev/null +++ b/.github/workflows/py-test-coverage.yaml @@ -0,0 +1,51 @@ +on: + push: + branches: ['feature/python', 'feature/py-*'] # change to [main, master, 'hotfix/*', 'release/*', develop, 'feature/py-*'] after finish + # pull_request: + # branches: [main, master, 'hotfix/*', 'release/*', develop] + +name: py-test-coverage + +jobs: + py-test-coverage: + runs-on: ubuntu-latest + + defaults: + run: + working-directory: python + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-variant: Miniforge3 + activate-environment: bvhar-dev + environment-file: python/requirements/environment-dev.yml + python-version: '3.11' + auto-activate-base: false + + - name: Install dependencies + run: conda run -n bvhar-dev pip install -e . -v + + - name: Test coverage + run: conda run -n bvhar-dev pytest --cov=./ --cov-report=xml | tee pytest_output.txt + + - uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + working-directory: python + codecov_yml_path: ../codecov.yml + flags: python + + - name: Show pytest output + if: always() + run: | + ## -------------------------------------------------------------------- + find . -name 'pytest_output.txt' -exec cat '{}' \; || true + shell: bash diff --git a/.github/workflows/py-wheel-check.yaml b/.github/workflows/py-wheel-check.yaml new file mode 100644 index 00000000..e433728e --- /dev/null +++ b/.github/workflows/py-wheel-check.yaml @@ -0,0 +1,139 @@ +on: + push: + branches: ['feature/python', 'feature/py-*'] # change to [main, master, 'hotfix/*', 'release/*', develop, 'feature/py-*'] after finish + # pull_request: + # branches: [main, master, 'hotfix/*', 'release/*', develop] + +name: py-wheel-check + +jobs: + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.10', '3.11', '3.12'] + + defaults: + run: + working-directory: python + + name: ${{ matrix.os }} (${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Set system dependencies on Linux + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y libboost-all-dev libeigen3-dev + echo $(sudo apt list --installed) + echo "EIGEN_INCLUDE_DIR=/usr/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=/usr/include/boost" >> $GITHUB_ENV + + - name: Set system dependencies on macOS + if: runner.os == 'macOS' + run: | + brew update + brew install llvm libomp boost eigen + EIGEN_LOC=$(brew --prefix eigen) + BOOST_LOC=$(brew --prefix boost) + LLVM_LOC=$(brew --prefix llvm) + OMP_LOC=$(brew --prefix libomp) + echo "EIGEN_INCLUDE_DIR=$EIGEN_LOC/include/eigen3" >> $GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$BOOST_LOC/include" >> $GITHUB_ENV + echo "LLVM_LOC=$LLVM_LOC" >> $GITHUB_ENV + echo "OMP_LOC=$OMP_LOC" >> $GITHUB_ENV + echo "CC=$LLVM_LOC/bin/clang" >> $GITHUB_ENV + echo "CXX=$LLVM_LOC/bin/clang++" >> $GITHUB_ENV + echo "CPPFLAGS=-I$LLVM_LOC/include -I$OMP_LOC/include" >> $GITHUB_ENV + echo "LDFLAGS=-L$LLVM_LOC/lib -L$OMP_LOC/lib" >> $GITHUB_ENV + + - name: Set system dependencies on Windows + if: runner.os == 'Windows' + run: | + choco install eigen -y --no-progress + choco install boost-msvc-14.3 -y --no-progress + $eigenPath = ( + Get-ChildItem -Path "C:\ProgramData\chocolatey\lib\eigen" -Recurse -Filter "Eigen" | + Select-Object -First 1 + ).Parent.FullName + if ($eigenPath) { + echo "eigen is installed in $eigenPath" + } else { + Write-Error "Wrong eigen path" + exit 1 + } + $boostPath = $null + $boostCand = @("C:\local", "C:\ProgramData\chocolatey\lib") + foreach ($cand in $boostCand) { + $isPath = ( + Get-ChildItem -Path $cand -Directory | + Where-Object { $_.Name -match "boost" } | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 + ).FullName + if ($isPath) { + $boostPath = $isPath + break + } + } + if ($boostPath) { + echo "boost is installed in $boostPath" + } else { + Write-Error "Wrong boost path" + exit 1 + } + echo "EIGEN_INCLUDE_DIR=$eigenPath" >> $Env:GITHUB_ENV + echo "BOOST_INCLUDE_DIR=$boostPath" >> $Env:GITHUB_ENV + + - name: Verify Eigen and boost on non-Windows + if: runner.os != 'Windows' + run: | + ls ${{ env.EIGEN_INCLUDE_DIR }} + ls ${{ env.BOOST_INCLUDE_DIR }} + + - name: Verify Eigen and boost on Windows + if: runner.os == 'Windows' + run: | + dir ${{ env.EIGEN_INCLUDE_DIR }} + dir ${{ env.BOOST_INCLUDE_DIR }} + + - name: Install requirements + run: | + pip install --upgrade pip + pip install -r requirements/requirements.txt + + - name: Verify pip + run: pip list + + - name: Build wheel + run: | + pip install build + python -m build --wheel . + + - name: Install wheel on non-Windows + if: runner.os != 'Windows' + run: pip install dist/*.whl + + - name: Install wheel on Windows + if: runner.os == 'Windows' + run: | + $whl = (Get-ChildItem -Path dist -Filter "*.whl" | Select-Object -First 1).FullName + pip install $whl + + - name: Verify installation + run: pip list + + - name: OpenMP check + run: python -c "from bvhar.utils import checkomp; checkomp.check_omp()" + + - name: Test + run: pytest diff --git a/.github/workflows/quartodoc.yaml b/.github/workflows/quartodoc.yaml new file mode 100644 index 00000000..6af66f8b --- /dev/null +++ b/.github/workflows/quartodoc.yaml @@ -0,0 +1,57 @@ +on: + push: + branches: ['feature/python'] # change to [main, master, develop] after finish +# pull_request: +# branches: [main, master] +# workflow_dispatch: + +name: quartodoc + +jobs: + quartodoc: + runs-on: ubuntu-latest + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + permissions: + contents: write + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-variant: Miniforge3 + activate-environment: bvhar-dev + environment-file: python/requirements/environment-dev.yml + python-version: '3.11' + auto-activate-base: false + + - name: Install + run: conda run -n bvhar-dev pip install -e . -v + working-directory: python + + - name: Set up Quarto + uses: quarto-dev/quarto-actions/setup@v2 + + - name: Build site + run: | + conda run -n bvhar-dev quartodoc build + conda run -n bvhar-dev quarto render . + working-directory: python/docs + + - name: Deploy to GitHub pages 🚀 + if: github.event_name != 'pull_request' + uses: JamesIves/github-pages-deploy-action@v4.6.1 + with: + token: ${{ secrets.YGEUNKIM_PAGE_PAT }} + repository-name: ygeunkim/ygeunkim.github.io + clean: false + branch: master + folder: python/docs/_site + target-folder: package/bvhar/python diff --git a/codecov.yml b/codecov.yml index 0459bdad..eca443f2 100644 --- a/codecov.yml +++ b/codecov.yml @@ -15,3 +15,12 @@ coverage: codecov: token: token + +flags: + r-package: + default: true + paths: + - . + python: + paths: + - python \ No newline at end of file diff --git a/inst/include/bvharcommon.h b/inst/include/bvharcommon.h index 63112777..a77eaefc 100644 --- a/inst/include/bvharcommon.h +++ b/inst/include/bvharcommon.h @@ -1,7 +1,7 @@ #ifndef BVHARCOMMON_H #define BVHARCOMMON_H -#include +#include "commondefs.h" #include #include #include @@ -48,16 +48,16 @@ inline double gammafn(double x) { inline double mgammafn(double x, int p) { if (p < 1) { - Rcpp::stop("'p' should be larger than or same as 1."); + STOP("'p' should be larger than or same as 1."); } if (x <= 0) { - Rcpp::stop("'x' should be larger than 0."); + STOP("'x' should be larger than 0."); } if (p == 1) { return gammafn(x); } if (2 * x < p) { - Rcpp::stop("'x / 2' should be larger than 'p'."); + STOP("'x / 2' should be larger than 'p'."); } double res = pow(M_PI, p * (p - 1) / 4.0); for (int i = 0; i < p; i++) { @@ -111,13 +111,13 @@ inline double lmgammafn(double x, int p) { // @param lg If true, return log(f) inline double invgamma_dens(double x, double shp, double scl, bool lg) { if (x < 0 ) { - Rcpp::stop("'x' should be larger than 0."); + STOP("'x' should be larger than 0."); } if (shp <= 0 ) { - Rcpp::stop("'shp' should be larger than 0."); + STOP("'shp' should be larger than 0."); } if (scl <= 0 ) { - Rcpp::stop("'scl' should be larger than 0."); + STOP("'scl' should be larger than 0."); } double res = pow(scl, shp) * pow(x, -shp - 1) * exp(-scl / x) / bvhar::gammafn(shp); if (lg) { @@ -127,28 +127,38 @@ inline double invgamma_dens(double x, double shp, double scl, bool lg) { } // RNG---------------------------------------- +#ifdef USE_RCPP inline double bindom_rand(int n, double prob) { return Rf_rbinom(n, prob); } +inline double chisq_rand(double df) { + return Rf_rchisq(df); +} + +inline double gamma_rand(double shp, double scl) { + return Rf_rgamma(shp, scl); // 2nd: scale +} + +inline double unif_rand(double min, double max) { + return Rf_runif(min, max); +} + +inline double beta_rand(double s1, double s2) { + return Rf_rbeta(s1, s2); +} +#endif + inline double normal_rand(boost::random::mt19937& rng) { boost::random::normal_distribution<> rdist(0.0, 1.0); return rdist(rng); } -inline double chisq_rand(double df) { - return Rf_rchisq(df); -} - inline double chisq_rand(double df, boost::random::mt19937& rng) { boost::random::chi_squared_distribution<> rdist(df); return rdist(rng); } -inline double gamma_rand(double shp, double scl) { - return Rf_rgamma(shp, scl); // 2nd: scale -} - inline double gamma_rand(double shp, double scl, boost::random::mt19937& rng) { boost::random::gamma_distribution<> rdist(shp, scl); // 2nd: scale return rdist(rng); @@ -159,19 +169,11 @@ inline double ber_rand(double prob, boost::random::mt19937& rng) { return rdist(rng) * 1.0; // change to int later: now just use double to match Rf_rbinom } -inline double unif_rand(double min, double max) { - return Rf_runif(min, max); -} - inline double unif_rand(double min, double max, boost::random::mt19937& rng) { boost::random::uniform_real_distribution<> rdist(min, max); return rdist(rng); } -inline double beta_rand(double s1, double s2) { - return Rf_rbeta(s1, s2); -} - inline double beta_rand(double s1, double s2, boost::random::mt19937& rng) { boost::random::beta_distribution<> rdist(s1, s2); return rdist(rng); diff --git a/inst/include/bvhardesign.h b/inst/include/bvhardesign.h index 469ba018..0db226e3 100644 --- a/inst/include/bvhardesign.h +++ b/inst/include/bvhardesign.h @@ -1,7 +1,7 @@ #ifndef BVHARDESIGN_H #define BVHARDESIGN_H -#include +#include "commondefs.h" namespace bvhar { diff --git a/inst/include/bvhardraw.h b/inst/include/bvhardraw.h index 85978f81..3726a602 100644 --- a/inst/include/bvhardraw.h +++ b/inst/include/bvhardraw.h @@ -28,14 +28,14 @@ struct RegParams { RegParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& spec, Rcpp::List& intercept, + LIST& spec, LIST& intercept, bool include_mean ) : _iter(num_iter), _x(x), _y(y), - _sig_shp(Rcpp::as(spec["shape"])), - _sig_scl(Rcpp::as(spec["scale"])), - _mean_non(Rcpp::as(intercept["mean_non"])), - _sd_non(intercept["sd_non"]), _mean(include_mean) {} + _sig_shp(CAST(spec["shape"])), + _sig_scl(CAST(spec["scale"])), + _mean_non(CAST(intercept["mean_non"])), + _sd_non(CAST_DOUBLE(intercept["sd_non"])), _mean(include_mean) {} }; struct RegInits { @@ -48,9 +48,10 @@ struct RegInits { int num_lowerchol = dim * (dim - 1) / 2; _contem = .001 * Eigen::VectorXd::Zero(num_lowerchol); } - RegInits(Rcpp::List& init) - : _coef(Rcpp::as(init["init_coef"])), - _contem(Rcpp::as(init["init_contem"])) {} + + RegInits(LIST& init) + : _coef(CAST(init["init_coef"])), + _contem(CAST(init["init_contem"])) {} }; struct RegRecords { @@ -290,6 +291,7 @@ inline Eigen::MatrixXd build_inv_lower(int dim, Eigen::VectorXd lower_vec) { return res; } +#ifdef USE_RCPP // Generating the Diagonal Component of Cholesky Factor in SSVS Gibbs Sampler // // In MCMC process of SSVS, this function generates the diagonal component \eqn{\Psi} from variance matrix @@ -414,6 +416,7 @@ inline void ssvs_coef(Eigen::VectorXd& coef, Eigen::VectorXd& prior_mean, Eigen: Eigen::MatrixXd normal_mean = llt_sig.solve(scaled_xtx * coef_ols + prior_prec * prior_mean); coef = normal_mean + llt_sig.matrixU().solve(standard_normal); } +#endif // Generating Dummy Vector for Parameters in SSVS Gibbs Sampler // @@ -539,7 +542,7 @@ inline void draw_coef(Eigen::Ref coef, Eigen::Ref() ); if (lltOfscale.info() == Eigen::NumericalIssue) { - Rcpp::stop("LLT error"); + STOP("LLT error"); } Eigen::VectorXd post_mean = lltOfscale.solve(prior_prec.cwiseProduct(prior_mean) + x.transpose() * y); coef = post_mean + lltOfscale.matrixU().solve(res); @@ -671,6 +674,7 @@ inline void varsv_h0(Eigen::VectorXd& h0, Eigen::VectorXd& prior_mean, Eigen::Ma h0 = post_mean + lltOfscale.matrixU().solve(res); } +#ifdef USE_RCPP // Building a Inverse Diagonal Matrix by Global and Local Hyperparameters // // In MCMC process of Horseshoe, this function computes diagonal matrix \eqn{\Lambda_\ast^{-1}} defined by @@ -770,6 +774,7 @@ inline double horseshoe_var(Eigen::VectorXd& response_vec, Eigen::MatrixXd& desi 2 / ((response_vec - design_mat * coef_vec).squaredNorm() + coef_vec.transpose() * shrink_mat * coef_vec), rng ); } +#endif // Generating the Squared Grouped Local Sparsity Hyperparameters Vector in Horseshoe Gibbs Sampler // diff --git a/inst/include/bvharprogress.h b/inst/include/bvharprogress.h index 4ed45c1a..a8becdd0 100644 --- a/inst/include/bvharprogress.h +++ b/inst/include/bvharprogress.h @@ -2,7 +2,17 @@ #define BVHARPROGRESS_H #include "bvharomp.h" -#include +#ifdef USE_RCPP + #include + #define COUT Rcpp::Rcout + #define ENDL "\n" + #define FLUSH Rcpp::Rcout.flush() +#else + #include + #define COUT std::cout + #define ENDL std::endl + #define FLUSH std::cout.flush() +#endif namespace bvhar { @@ -24,18 +34,24 @@ class bvharprogress { return; // not display when verbose is false } int percent = _current * 100 / _total; - Rcpp::Rcout << "\r"; + // Rcpp::Rcout << "\r"; + COUT << "\r"; for (int i = 0; i < _width; i++) { if (i < (percent * _width / 100)) { - Rcpp::Rcout << "#"; + // Rcpp::Rcout << "#"; + std::cout << "#"; } else { - Rcpp::Rcout << " "; + // Rcpp::Rcout << " "; + COUT << " "; } } - Rcpp::Rcout << " " << percent << "%"; - Rcpp::Rcout.flush(); + // Rcpp::Rcout << " " << percent << "%"; + // Rcpp::Rcout.flush(); + COUT << " " << percent << "%"; + FLUSH; if (_current >= _total) { - Rcpp::Rcout << "\n"; + // Rcpp::Rcout << "\n"; + COUT << ENDL; } } private: diff --git a/inst/include/bvharsim.h b/inst/include/bvharsim.h index 146788c0..83cd27a6 100644 --- a/inst/include/bvharsim.h +++ b/inst/include/bvharsim.h @@ -5,6 +5,7 @@ #include #include +#ifdef USE_RCPP Eigen::MatrixXd sim_mgaussian(int num_sim, Eigen::VectorXd mu, Eigen::MatrixXd sig); Eigen::MatrixXd sim_mgaussian_chol(int num_sim, Eigen::VectorXd mu, Eigen::MatrixXd sig); @@ -16,9 +17,29 @@ Eigen::MatrixXd sim_mstudent(int num_sim, double df, Eigen::VectorXd mu, Eigen:: Eigen::MatrixXd sim_matgaussian(Eigen::MatrixXd mat_mean, Eigen::MatrixXd mat_scale_u, Eigen::MatrixXd mat_scale_v); Eigen::MatrixXd sim_iw(Eigen::MatrixXd mat_scale, double shape); - +#endif namespace bvhar { +// Log quasi-density of GIG +// +// @param x postivie support +// @param lambda Index of modified Bessel function of third kind. +// @param beta Square of the multiplication of the other two parameters. +inline double dgig_quasi(double x, double lambda, double beta) { + return (lambda - 1) * log(x) - beta * (x + 1 / x) / 2; +} + +// Compute mode of quasi-density of GIG +// @param lambda Index of modified Bessel function of third kind. +// @param beta Square of the multiplication of the other two parameters. +inline double dgig_mode(double lambda, double beta) { + if (lambda < 1) { + return beta / (sqrt((1 - lambda) * (1 - lambda) + beta * beta) + 1 - lambda); + } + return (sqrt((lambda - 1) * (lambda - 1) + beta * beta) - 1 + lambda) / beta; +} + +#ifdef USE_RCPP // Generate MN(M, U, V) // @param mat_mean Mean matrix M // @param mat_scale_u First scale matrix U @@ -42,24 +63,6 @@ inline Eigen::MatrixXd sim_mn(const Eigen::MatrixXd& mat_mean, const Eigen::Matr Eigen::MatrixXd chol_scale_u = mat_scale_u.llt().matrixL(); // U = LLT return mat_mean + chol_scale_u * mat_norm * chol_scale_v; // M + L X U_v ~ MN(M, LLT = U, U_vT U_v = V) } -// overloading -inline Eigen::MatrixXd sim_mn(const Eigen::MatrixXd& mat_mean, const Eigen::MatrixXd& mat_scale_u, const Eigen::MatrixXd& mat_scale_v, - bool prec, boost::random::mt19937& rng) { - int num_rows = mat_mean.rows(); - int num_cols = mat_mean.cols(); - Eigen::MatrixXd chol_scale_v = mat_scale_v.llt().matrixU(); // V = U_vTU_v - Eigen::MatrixXd mat_norm(num_rows, num_cols); // standard normal - for (int i = 0; i < num_rows; i++) { - for (int j = 0; j < num_cols; j++) { - mat_norm(i, j) = normal_rand(rng); - } - } - if (prec) { - return mat_mean + mat_scale_u.llt().matrixU().solve(mat_norm * chol_scale_v); // M + U_u^(-1) X U_v ~ MN(M, U_u^(-1) U_u^(-1)T = U, U_vT U_v = V) - } - Eigen::MatrixXd chol_scale_u = mat_scale_u.llt().matrixL(); // U = LLT - return mat_mean + chol_scale_u * mat_norm * chol_scale_v; // M + L X U_v ~ MN(M, LLT = U, U_vT U_v = V) -} // Generate Lower Triangular Matrix of IW // @@ -72,13 +75,13 @@ inline Eigen::MatrixXd sim_mn(const Eigen::MatrixXd& mat_mean, const Eigen::Matr inline Eigen::MatrixXd sim_iw_tri(Eigen::MatrixXd mat_scale, double shape) { int dim = mat_scale.cols(); if (shape <= dim - 1) { - Rcpp::stop("Wrong 'shape'. shape > dim - 1 must be satisfied."); + STOP("Wrong 'shape'. shape > dim - 1 must be satisfied."); } if (mat_scale.rows() != mat_scale.cols()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); + STOP("Invalid 'mat_scale' dimension."); } if (dim != mat_scale.rows()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); + STOP("Invalid 'mat_scale' dimension."); } Eigen::MatrixXd mat_bartlett = Eigen::MatrixXd::Zero(dim, dim); // upper triangular bartlett decomposition // generate in row direction @@ -94,31 +97,6 @@ inline Eigen::MatrixXd sim_iw_tri(Eigen::MatrixXd mat_scale, double shape) { // return chol_scale * mat_bartlett.inverse().transpose(); // lower triangular return chol_scale * mat_bartlett.transpose().triangularView().solve(Eigen::MatrixXd::Identity(dim, dim)); // lower triangular } -// overloading -inline Eigen::MatrixXd sim_iw_tri(const Eigen::MatrixXd& mat_scale, double shape, boost::random::mt19937& rng) { - int dim = mat_scale.cols(); - if (shape <= dim - 1) { - Rcpp::stop("Wrong 'shape'. shape > dim - 1 must be satisfied."); - } - if (mat_scale.rows() != mat_scale.cols()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); - } - if (dim != mat_scale.rows()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); - } - Eigen::MatrixXd mat_bartlett = Eigen::MatrixXd::Zero(dim, dim); // upper triangular bartlett decomposition - // generate in row direction - for (int i = 0; i < dim; i++) { - mat_bartlett(i, i) = sqrt(bvhar::chisq_rand(shape - (double)i, rng)); // diagonal: qii^2 ~ chi^2(nu - i + 1) - } - for (int i = 0; i < dim - 1; i ++) { - for (int j = i + 1; j < dim; j++) { - mat_bartlett(i, j) = normal_rand(rng); // upper triangular (j > i) ~ N(0, 1) - } - } - Eigen::MatrixXd chol_scale = mat_scale.llt().matrixL(); - return chol_scale * mat_bartlett.transpose().triangularView().solve(Eigen::MatrixXd::Identity(dim, dim)); // lower triangular -} inline Eigen::MatrixXd sim_inv_wishart(const Eigen::MatrixXd& mat_scale, double shape) { Eigen::MatrixXd chol_res = sim_iw_tri(mat_scale, shape); @@ -142,16 +120,6 @@ inline std::vector sim_mn_iw(const Eigen::MatrixXd& mat_mean, c res[1] = mat_scale_v; return res; } -// overloading -inline std::vector sim_mn_iw(const Eigen::MatrixXd& mat_mean, const Eigen::MatrixXd& mat_scale_u, - const Eigen::MatrixXd& mat_scale, double shape, bool prec, boost::random::mt19937& rng) { - Eigen::MatrixXd chol_res = sim_iw_tri(mat_scale, shape, rng); - Eigen::MatrixXd mat_scale_v = chol_res * chol_res.transpose(); - std::vector res(2); - res[0] = sim_mn(mat_mean, mat_scale_u, mat_scale_v, prec, rng); - res[1] = mat_scale_v; - return res; -} // Generate Lower Triangular Matrix of Wishart // @@ -164,13 +132,13 @@ inline std::vector sim_mn_iw(const Eigen::MatrixXd& mat_mean, c inline Eigen::MatrixXd sim_wishart(Eigen::MatrixXd mat_scale, double shape) { int dim = mat_scale.cols(); if (shape <= dim - 1) { - Rcpp::stop("Wrong 'shape'. shape > dim - 1 must be satisfied."); + STOP("Wrong 'shape'. shape > dim - 1 must be satisfied."); } if (mat_scale.rows() != mat_scale.cols()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); + STOP("Invalid 'mat_scale' dimension."); } if (dim != mat_scale.rows()) { - Rcpp::stop("Invalid 'mat_scale' dimension."); + STOP("Invalid 'mat_scale' dimension."); } Eigen::MatrixXd mat_bartlett = Eigen::MatrixXd::Zero(dim, dim); for (int i = 0; i < dim; i++) { @@ -187,25 +155,6 @@ inline Eigen::MatrixXd sim_wishart(Eigen::MatrixXd mat_scale, double shape) { return chol_res * chol_res.transpose(); } -// Log quasi-density of GIG -// -// @param x postivie support -// @param lambda Index of modified Bessel function of third kind. -// @param beta Square of the multiplication of the other two parameters. -inline double dgig_quasi(double x, double lambda, double beta) { - return (lambda - 1) * log(x) - beta * (x + 1 / x) / 2; -} - -// Compute mode of quasi-density of GIG -// @param lambda Index of modified Bessel function of third kind. -// @param beta Square of the multiplication of the other two parameters. -inline double dgig_mode(double lambda, double beta) { - if (lambda < 1) { - return beta / (sqrt((1 - lambda) * (1 - lambda) + beta * beta) + 1 - lambda); - } - return (sqrt((lambda - 1) * (lambda - 1) + beta * beta) - 1 + lambda) / beta; -} - // AR-Mehod for non-concave part // // Algorithm 1 of Hormann and Leydold @@ -263,56 +212,6 @@ inline void rgig_nonconcave(Eigen::VectorXd& res, int num_sim, double lambda, do res[i] = cand; } } -// overloading -inline void rgig_nonconcave(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { - double mode = dgig_mode(lambda, beta); // argmax of g(x) - double x0, xstar, k1, k2, k3, A1, A2, A3; - x0 = beta / (1 - lambda); // subdomain (0, x0) - xstar = std::max(x0, 2 / beta); - k1 = exp(dgig_quasi(mode, lambda, beta)); - A1 = k1 * x0; - if (x0 < 2 / beta) { // subdomain (x0, 2 / beta) - k2 = exp(-beta); - if (lambda == 0) { - A2 = k2 * log(2 / (beta * beta)); - } else { - A2 = k2 * (pow(2 / beta, lambda) - pow(x0, lambda)) / lambda; - } - } else { - k2 = 0; - A2 = 0; - } - k3 = pow(xstar, lambda - 1); // subdomain (xstar, inf) - A3 = 2 * k3 * exp(-xstar * beta / 2) / beta; - double A = A1 + A2 + A3; - bool rejected; - double draw_unif, draw_prop, cand, ar_const; - for (int i = 0; i < num_sim; i++) { - rejected = true; - while (rejected) { - draw_prop = unif_rand(0, A, rng); - if (draw_prop <= A1) { // subdomain (0, x0) - cand = x0 * draw_prop / A1; - ar_const = log(k1); - } else if (draw_prop <= A1 + A2) { // subdomain (x0, 2 / beta) - draw_prop -= A1; - if (lambda == 0) { - cand = beta * exp(draw_prop * exp(beta)); - } else { - cand = pow(pow(x0, lambda) + draw_prop * lambda / k2, 1 / lambda); - } - ar_const = log(k2) + (lambda - 1) * log(cand); - } else { // subdomain (xstar, inf) - draw_prop -= (A1 + A2); - cand = -2 * log(exp(-xstar * beta / 2) - draw_prop * beta / (2 * k3)) / beta; - ar_const = log(k3) - cand * beta / 2; - } - draw_unif = unif_rand(0, 1, rng); - rejected = log(draw_unif) + ar_const > dgig_quasi(cand, lambda, beta); - } - res[i] = cand; - } -} // Ratio-of-Uniforms without Mode Shift // @@ -340,25 +239,6 @@ inline void rgig_without_mode(Eigen::VectorXd& res, int num_sim, double lambda, res[i] = cand; } } -// overloading -inline void rgig_without_mode(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { - double mode = dgig_mode(lambda, beta); // argmax of g(x) - double mode_x = (1 + lambda + sqrt((1 + lambda) * (1 + lambda) + beta * beta)) / beta; // argmax of x g(x) - double bound_y = dgig_quasi(mode, lambda, beta) / 2; // To normalize g - double bound_x = exp(log(mode_x) + dgig_quasi(mode_x, lambda, beta) / 2 - bound_y); - bool rejected; - double draw_x, draw_y, cand; // bounded rectangle - for (int i = 0; i < num_sim; i++) { - rejected = true; - while (rejected) { - draw_x = unif_rand(0, bound_x, rng); - draw_y = unif_rand(0, 1, rng); - cand = draw_x / draw_y; - rejected = log(draw_y) > dgig_quasi(cand, lambda, beta) / 2 - bound_y; // Check if U <= g(y) / unif(y) - } - res[i] = cand; - } -} // Ratio-of-Uniforms with Mode Shift // @@ -397,36 +277,6 @@ inline void rgig_with_mode(Eigen::VectorXd& res, int num_sim, double lambda, dou res[i] = cand; } } -// overloading -inline void rgig_with_mode(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { - double mode = dgig_mode(lambda, beta); // argmax of g(x) - double quad_coef = -2 * (lambda + 1) / beta - mode; - double lin_coef = 2 * mode * (lambda - 1) / beta - 1; - double p = lin_coef - quad_coef * quad_coef / 3; - double q = 2 * quad_coef * quad_coef * quad_coef / 27 - quad_coef * lin_coef / 3 + mode; - double phi = acos(-q * sqrt(-27 / (p * p * p)) / 2); - double arg_x_neg = sqrt(-p * 4 / 3) * cos(phi / 3 + M_PI * 4 / 3) - quad_coef / 3; - double arg_x_pos = sqrt(-p * 4 / 3) * cos(phi / 3) - quad_coef / 3; - double bound_y = dgig_quasi(mode, lambda, beta) / 2; // use as normalize factor - double bound_x_neg = (arg_x_neg - mode) * exp(dgig_quasi(arg_x_neg, lambda, beta) / 2 - bound_y); - double bound_x_pos = (arg_x_pos - mode) * exp(dgig_quasi(arg_x_pos, lambda, beta) / 2 - bound_y); - bool rejected; - double draw_x, draw_y, cand; // bounded rectangle - for (int i = 0; i < num_sim; i++) { - rejected = true; - while (rejected) { - draw_x = unif_rand(bound_x_neg, bound_x_pos, rng); - draw_y = unif_rand(0, 1, rng); // U(0, 1) since g has been normalized - cand = draw_x / draw_y + mode; - if (cand > 0) { - rejected = log(draw_y) > dgig_quasi(cand, lambda, beta) / 2 - bound_y; // Check if U <= g(y) / unif(y) - } else { - rejected = true; // cand can be negative - } - } - res[i] = cand; - } -} // Generate Generalized Inverse Gaussian Distribution // @@ -439,7 +289,7 @@ inline void rgig_with_mode(Eigen::VectorXd& res, int num_sim, double lambda, dou // @param chi Third parameter of GIG inline Eigen::VectorXd sim_gig(int num_sim, double lambda, double psi, double chi) { // if (psi <= 0 || chi <= 0) { - // Rcpp::stop("Wrong 'psi' and 'chi' range."); + // STOP("Wrong 'psi' and 'chi' range."); // } Eigen::VectorXd res(num_sim); double abs_lam = abs(lambda); // If lambda < 0, use 1 / X as the result @@ -486,7 +336,7 @@ inline Eigen::VectorXd sim_gig(int num_sim, double lambda, double psi, double ch } else if (beta > 0) { rgig_nonconcave(res, num_sim, abs_lam, beta); // non-T_(-1/2)-concave part } else { - Rcpp::stop("Wrong parameter ranges for quasi GIG density: lambda = %g, psi = %g, chi = %g", lambda, psi, chi); + STOP("Wrong parameter ranges for quasi GIG density: lambda = %g, psi = %g, chi = %g", lambda, psi, chi); } if (lambda < 0) { res = res.cwiseInverse(); @@ -494,7 +344,168 @@ inline Eigen::VectorXd sim_gig(int num_sim, double lambda, double psi, double ch // return res / alpha; // alpha: reciprocal of scale parameter return res * sqrt(chi / psi); } -// overloading +#endif + +// Generate MN(M, U, V) +inline Eigen::MatrixXd sim_mn(const Eigen::MatrixXd& mat_mean, const Eigen::MatrixXd& mat_scale_u, const Eigen::MatrixXd& mat_scale_v, + bool prec, boost::random::mt19937& rng) { + int num_rows = mat_mean.rows(); + int num_cols = mat_mean.cols(); + Eigen::MatrixXd chol_scale_v = mat_scale_v.llt().matrixU(); // V = U_vTU_v + Eigen::MatrixXd mat_norm(num_rows, num_cols); // standard normal + for (int i = 0; i < num_rows; i++) { + for (int j = 0; j < num_cols; j++) { + mat_norm(i, j) = normal_rand(rng); + } + } + if (prec) { + return mat_mean + mat_scale_u.llt().matrixU().solve(mat_norm * chol_scale_v); // M + U_u^(-1) X U_v ~ MN(M, U_u^(-1) U_u^(-1)T = U, U_vT U_v = V) + } + Eigen::MatrixXd chol_scale_u = mat_scale_u.llt().matrixL(); // U = LLT + return mat_mean + chol_scale_u * mat_norm * chol_scale_v; // M + L X U_v ~ MN(M, LLT = U, U_vT U_v = V) +} + +// Generate Lower Triangular Matrix of IW +inline Eigen::MatrixXd sim_iw_tri(const Eigen::MatrixXd& mat_scale, double shape, boost::random::mt19937& rng) { + int dim = mat_scale.cols(); + if (shape <= dim - 1) { + STOP("Wrong 'shape'. shape > dim - 1 must be satisfied."); + } + if (mat_scale.rows() != mat_scale.cols()) { + STOP("Invalid 'mat_scale' dimension."); + } + if (dim != mat_scale.rows()) { + STOP("Invalid 'mat_scale' dimension."); + } + Eigen::MatrixXd mat_bartlett = Eigen::MatrixXd::Zero(dim, dim); // upper triangular bartlett decomposition + // generate in row direction + for (int i = 0; i < dim; i++) { + mat_bartlett(i, i) = sqrt(bvhar::chisq_rand(shape - (double)i, rng)); // diagonal: qii^2 ~ chi^2(nu - i + 1) + } + for (int i = 0; i < dim - 1; i ++) { + for (int j = i + 1; j < dim; j++) { + mat_bartlett(i, j) = normal_rand(rng); // upper triangular (j > i) ~ N(0, 1) + } + } + Eigen::MatrixXd chol_scale = mat_scale.llt().matrixL(); + return chol_scale * mat_bartlett.transpose().triangularView().solve(Eigen::MatrixXd::Identity(dim, dim)); // lower triangular +} + +// Generate MNIW(M, U, Psi, nu) +inline std::vector sim_mn_iw(const Eigen::MatrixXd& mat_mean, const Eigen::MatrixXd& mat_scale_u, + const Eigen::MatrixXd& mat_scale, double shape, bool prec, boost::random::mt19937& rng) { + Eigen::MatrixXd chol_res = sim_iw_tri(mat_scale, shape, rng); + Eigen::MatrixXd mat_scale_v = chol_res * chol_res.transpose(); + std::vector res(2); + res[0] = sim_mn(mat_mean, mat_scale_u, mat_scale_v, prec, rng); + res[1] = mat_scale_v; + return res; +} + +// AR-Mehod for non-concave part +inline void rgig_nonconcave(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { + double mode = dgig_mode(lambda, beta); // argmax of g(x) + double x0, xstar, k1, k2, k3, A1, A2, A3; + x0 = beta / (1 - lambda); // subdomain (0, x0) + xstar = std::max(x0, 2 / beta); + k1 = exp(dgig_quasi(mode, lambda, beta)); + A1 = k1 * x0; + if (x0 < 2 / beta) { // subdomain (x0, 2 / beta) + k2 = exp(-beta); + if (lambda == 0) { + A2 = k2 * log(2 / (beta * beta)); + } else { + A2 = k2 * (pow(2 / beta, lambda) - pow(x0, lambda)) / lambda; + } + } else { + k2 = 0; + A2 = 0; + } + k3 = pow(xstar, lambda - 1); // subdomain (xstar, inf) + A3 = 2 * k3 * exp(-xstar * beta / 2) / beta; + double A = A1 + A2 + A3; + bool rejected; + double draw_unif, draw_prop, cand, ar_const; + for (int i = 0; i < num_sim; i++) { + rejected = true; + while (rejected) { + draw_prop = unif_rand(0, A, rng); + if (draw_prop <= A1) { // subdomain (0, x0) + cand = x0 * draw_prop / A1; + ar_const = log(k1); + } else if (draw_prop <= A1 + A2) { // subdomain (x0, 2 / beta) + draw_prop -= A1; + if (lambda == 0) { + cand = beta * exp(draw_prop * exp(beta)); + } else { + cand = pow(pow(x0, lambda) + draw_prop * lambda / k2, 1 / lambda); + } + ar_const = log(k2) + (lambda - 1) * log(cand); + } else { // subdomain (xstar, inf) + draw_prop -= (A1 + A2); + cand = -2 * log(exp(-xstar * beta / 2) - draw_prop * beta / (2 * k3)) / beta; + ar_const = log(k3) - cand * beta / 2; + } + draw_unif = unif_rand(0, 1, rng); + rejected = log(draw_unif) + ar_const > dgig_quasi(cand, lambda, beta); + } + res[i] = cand; + } +} + +// Ratio-of-Uniforms without Mode Shift +inline void rgig_without_mode(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { + double mode = dgig_mode(lambda, beta); // argmax of g(x) + double mode_x = (1 + lambda + sqrt((1 + lambda) * (1 + lambda) + beta * beta)) / beta; // argmax of x g(x) + double bound_y = dgig_quasi(mode, lambda, beta) / 2; // To normalize g + double bound_x = exp(log(mode_x) + dgig_quasi(mode_x, lambda, beta) / 2 - bound_y); + bool rejected; + double draw_x, draw_y, cand; // bounded rectangle + for (int i = 0; i < num_sim; i++) { + rejected = true; + while (rejected) { + draw_x = unif_rand(0, bound_x, rng); + draw_y = unif_rand(0, 1, rng); + cand = draw_x / draw_y; + rejected = log(draw_y) > dgig_quasi(cand, lambda, beta) / 2 - bound_y; // Check if U <= g(y) / unif(y) + } + res[i] = cand; + } +} + + +// Ratio-of-Uniforms with Mode Shift +inline void rgig_with_mode(Eigen::VectorXd& res, int num_sim, double lambda, double beta, boost::random::mt19937& rng) { + double mode = dgig_mode(lambda, beta); // argmax of g(x) + double quad_coef = -2 * (lambda + 1) / beta - mode; + double lin_coef = 2 * mode * (lambda - 1) / beta - 1; + double p = lin_coef - quad_coef * quad_coef / 3; + double q = 2 * quad_coef * quad_coef * quad_coef / 27 - quad_coef * lin_coef / 3 + mode; + double phi = acos(-q * sqrt(-27 / (p * p * p)) / 2); + double arg_x_neg = sqrt(-p * 4 / 3) * cos(phi / 3 + M_PI * 4 / 3) - quad_coef / 3; + double arg_x_pos = sqrt(-p * 4 / 3) * cos(phi / 3) - quad_coef / 3; + double bound_y = dgig_quasi(mode, lambda, beta) / 2; // use as normalize factor + double bound_x_neg = (arg_x_neg - mode) * exp(dgig_quasi(arg_x_neg, lambda, beta) / 2 - bound_y); + double bound_x_pos = (arg_x_pos - mode) * exp(dgig_quasi(arg_x_pos, lambda, beta) / 2 - bound_y); + bool rejected; + double draw_x, draw_y, cand; // bounded rectangle + for (int i = 0; i < num_sim; i++) { + rejected = true; + while (rejected) { + draw_x = unif_rand(bound_x_neg, bound_x_pos, rng); + draw_y = unif_rand(0, 1, rng); // U(0, 1) since g has been normalized + cand = draw_x / draw_y + mode; + if (cand > 0) { + rejected = log(draw_y) > dgig_quasi(cand, lambda, beta) / 2 - bound_y; // Check if U <= g(y) / unif(y) + } else { + rejected = true; // cand can be negative + } + } + res[i] = cand; + } +} + +// Generate Generalized Inverse Gaussian Distribution inline Eigen::VectorXd sim_gig(int num_sim, double lambda, double psi, double chi, boost::random::mt19937& rng) { Eigen::VectorXd res(num_sim); double abs_lam = abs(lambda); // If lambda < 0, use 1 / X as the result @@ -541,7 +552,7 @@ inline Eigen::VectorXd sim_gig(int num_sim, double lambda, double psi, double ch } else if (beta > 0) { rgig_nonconcave(res, num_sim, abs_lam, beta, rng); // non-T_(-1/2)-concave part } else { - Rcpp::stop("Wrong parameter ranges for quasi GIG density: lambda = %g, psi = %g, chi = %g", lambda, psi, chi); + STOP("Wrong parameter ranges for quasi GIG density: lambda = %g, psi = %g, chi = %g", lambda, psi, chi); } if (lambda < 0) { res = res.cwiseInverse(); diff --git a/inst/include/bvharstructural.h b/inst/include/bvharstructural.h index f33351f2..ecba28f0 100644 --- a/inst/include/bvharstructural.h +++ b/inst/include/bvharstructural.h @@ -1,7 +1,12 @@ #ifndef BVHARSTRUCTURAL_H #define BVHARSTRUCTURAL_H -#include +#ifdef USE_RCPP + #include +#else + #include + #include +#endif namespace bvhar { diff --git a/inst/include/commondefs.h b/inst/include/commondefs.h new file mode 100644 index 00000000..ffa8b979 --- /dev/null +++ b/inst/include/commondefs.h @@ -0,0 +1,62 @@ +#ifndef COMMONDEFS_H +#define COMMONDEFS_H + +#ifdef USE_RCPP + #include + #define STOP(...) Rcpp::stop(__VA_ARGS__) + + #define LIST Rcpp::List + #define CAST Rcpp::as + #define CAST_DOUBLE(value) value + #define CAST_INT(value) value + #define CONTAINS(container, key) container.containsElementNamed(key) + #define CREATE_LIST(...) Rcpp::List::create(__VA_ARGS__) + #define NAMED Rcpp::Named + #define ACCESS_LIST(iterator, list) iterator + #define IS_MATRIX(element) Rcpp::is(element) + #define CAST_MATRIX(element) element +#else + #include + #include + #include + #include + #include + #include + #include + #include + + #define Rf_gammafn(x) std::tgamma(x) + #define Rf_lgammafn(x) std::lgamma(x) + #define Rf_dgamma(x, shp, scl, lg) (lg ? log((shp - 1) * log(x) - x / scl - std::lgamma(shp) - shp * log(scl)) : exp((shp - 1) * log(x) - x / scl - std::lgamma(shp) - shp * log(scl))) + + namespace py = pybind11; + + void stop_fmt(const std::string& msg) { + throw py::value_error(msg); + } + + template + void stop_fmt(const std::string& msg, Args&&... args) { + throw py::value_error(py::str(msg).format(std::forward(args)...)); + } + + #define STOP(...) stop_fmt(__VA_ARGS__) + + #define LIST py::dict + #define CAST py::cast + #define CAST_DOUBLE(value) py::cast(value) + #define CAST_INT(value) py::cast(value) + #define CONTAINS(container, key) container.contains(key) + #define CREATE_LIST(...) py::dict(__VA_ARGS__) + #define NAMED py::arg + #define ACCESS_LIST(iterator, list) list[iterator.first] + #define IS_MATRIX(element) py::detail::type_caster().load(element, false) + #define CAST_MATRIX(element) py::cast(element) + + #ifndef M_PI + // Some platform does not have M_PI defined - to the same value as in Rmath.h + #define M_PI 3.141592653589793238462643383280 + #endif +#endif + +#endif // COMMONDEFS_H \ No newline at end of file diff --git a/inst/include/mcmcreg.h b/inst/include/mcmcreg.h index a33e62ee..8b164f3d 100644 --- a/inst/include/mcmcreg.h +++ b/inst/include/mcmcreg.h @@ -38,27 +38,27 @@ struct MinnParams : public RegParams { Eigen::MatrixXd _prior_prec; MinnParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, Rcpp::List& priors, Rcpp::List& intercept, + LIST& reg_spec, LIST& priors, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), _prec_diag(Eigen::MatrixXd::Zero(y.cols(), y.cols())) { - int lag = priors["p"]; // append to bayes_spec, p = 3 in VHAR - Eigen::VectorXd _sigma = Rcpp::as(priors["sigma"]); - double _lambda = priors["lambda"]; - double _eps = priors["eps"]; + int lag = CAST_INT(priors["p"]); // append to bayes_spec, p = 3 in VHAR + Eigen::VectorXd _sigma = CAST(priors["sigma"]); + double _lambda = CAST_DOUBLE(priors["lambda"]); + double _eps = CAST_DOUBLE(priors["eps"]); int dim = _sigma.size(); Eigen::VectorXd _daily(dim); Eigen::VectorXd _weekly(dim); Eigen::VectorXd _monthly(dim); - if (priors.containsElementNamed("delta")) { - _daily = Rcpp::as(priors["delta"]); + if (CONTAINS(priors, "delta")) { + _daily = CAST(priors["delta"]); _weekly.setZero(); _monthly.setZero(); } else { - _daily = Rcpp::as(priors["daily"]); - _weekly = Rcpp::as(priors["weekly"]); - _monthly = Rcpp::as(priors["monthly"]); + _daily = CAST(priors["daily"]); + _weekly = CAST(priors["weekly"]); + _monthly = CAST(priors["monthly"]); } Eigen::MatrixXd dummy_response = build_ydummy(lag, _sigma, _lambda, _daily, _weekly, _monthly, false); Eigen::MatrixXd dummy_design = build_xdummy( @@ -85,29 +85,29 @@ struct HierminnParams : public RegParams { HierminnParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, + LIST& reg_spec, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& priors, Rcpp::List& intercept, + LIST& priors, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), - shape(priors["shape"]), rate(priors["rate"]), + shape(CAST_DOUBLE(priors["shape"])), rate(CAST_DOUBLE(priors["rate"])), _prec_diag(Eigen::MatrixXd::Zero(y.cols(), y.cols())) { - int lag = priors["p"]; // append to bayes_spec, p = 3 in VHAR - Eigen::VectorXd _sigma = Rcpp::as(priors["sigma"]); - double _eps = priors["eps"]; + int lag = CAST_INT(priors["p"]); // append to bayes_spec, p = 3 in VHAR + Eigen::VectorXd _sigma = CAST(priors["sigma"]); + double _eps = CAST_DOUBLE(priors["eps"]); int dim = _sigma.size(); Eigen::VectorXd _daily(dim); Eigen::VectorXd _weekly(dim); Eigen::VectorXd _monthly(dim); - if (priors.containsElementNamed("delta")) { - _daily = Rcpp::as(priors["delta"]); + if (CONTAINS(priors, "delta")) { + _daily = CAST(priors["delta"]); _weekly.setZero(); _monthly.setZero(); } else { - _daily = Rcpp::as(priors["daily"]); - _weekly = Rcpp::as(priors["weekly"]); - _monthly = Rcpp::as(priors["monthly"]); + _daily = CAST(priors["daily"]); + _weekly = CAST(priors["weekly"]); + _monthly = CAST(priors["monthly"]); } Eigen::MatrixXd dummy_response = build_ydummy(lag, _sigma, 1, _daily, _weekly, _monthly, false); Eigen::MatrixXd dummy_design = build_xdummy( @@ -152,9 +152,9 @@ struct SsvsParams : public RegParams { SsvsParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, + LIST& reg_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& ssvs_spec, Rcpp::List& intercept, + LIST& ssvs_spec, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), @@ -165,11 +165,11 @@ struct SsvsParams : public RegParams { // _contem_spike(Rcpp::as(ssvs_spec["chol_spike"])), // _contem_slab(Rcpp::as(ssvs_spec["chol_slab"])), // _contem_weight(Rcpp::as(ssvs_spec["chol_mixture"])), - _coef_s1(Rcpp::as(ssvs_spec["coef_s1"])), _coef_s2(Rcpp::as(ssvs_spec["coef_s2"])), - _contem_s1(ssvs_spec["chol_s1"]), _contem_s2(ssvs_spec["chol_s2"]), - _coef_spike_scl(ssvs_spec["coef_spike_scl"]), _contem_spike_scl(ssvs_spec["chol_spike_scl"]), - _coef_slab_shape(ssvs_spec["coef_slab_shape"]), _coef_slab_scl(ssvs_spec["coef_slab_scl"]), - _contem_slab_shape(ssvs_spec["chol_slab_shape"]), _contem_slab_scl(ssvs_spec["chol_slab_scl"]) {} + _coef_s1(CAST(ssvs_spec["coef_s1"])), _coef_s2(CAST(ssvs_spec["coef_s2"])), + _contem_s1(CAST_DOUBLE(ssvs_spec["chol_s1"])), _contem_s2(CAST_DOUBLE(ssvs_spec["chol_s2"])), + _coef_spike_scl(CAST_DOUBLE(ssvs_spec["coef_spike_scl"])), _contem_spike_scl(CAST_DOUBLE(ssvs_spec["chol_spike_scl"])), + _coef_slab_shape(CAST_DOUBLE(ssvs_spec["coef_slab_shape"])), _coef_slab_scl(CAST_DOUBLE(ssvs_spec["coef_slab_scl"])), + _contem_slab_shape(CAST_DOUBLE(ssvs_spec["chol_slab_shape"])), _contem_slab_scl(CAST_DOUBLE(ssvs_spec["chol_slab_scl"])) {} }; struct HorseshoeParams : public RegParams { @@ -178,9 +178,9 @@ struct HorseshoeParams : public RegParams { HorseshoeParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, + LIST& reg_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& intercept, bool include_mean + LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat) {} }; @@ -200,18 +200,18 @@ struct NgParams : public RegParams { NgParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, + LIST& reg_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& ng_spec, Rcpp::List& intercept, + LIST& ng_spec, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat), // _local_shape(ng_spec["local_shape"]), // _contem_shape(ng_spec["contem_shape"]), - _mh_sd(ng_spec["shape_sd"]), - _group_shape(ng_spec["group_shape"]), _group_scl(ng_spec["group_scale"]), - _global_shape(ng_spec["global_shape"]), _global_scl(ng_spec["global_scale"]), - _contem_global_shape(ng_spec["contem_global_shape"]), _contem_global_scl(ng_spec["contem_global_scale"]) {} + _mh_sd(CAST_DOUBLE(ng_spec["shape_sd"])), + _group_shape(CAST_DOUBLE(ng_spec["group_shape"])), _group_scl(CAST_DOUBLE(ng_spec["group_scale"])), + _global_shape(CAST_DOUBLE(ng_spec["global_shape"])), _global_scl(CAST_DOUBLE(ng_spec["global_scale"])), + _contem_global_shape(CAST_DOUBLE(ng_spec["contem_global_shape"])), _contem_global_scl(CAST_DOUBLE(ng_spec["contem_global_scale"])) {} }; struct DlParams : public RegParams { @@ -225,23 +225,23 @@ struct DlParams : public RegParams { DlParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& reg_spec, + LIST& reg_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& dl_spec, Rcpp::List& intercept, + LIST& dl_spec, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, reg_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat), // _dl_concen(dl_spec["dirichlet"]), _contem_dl_concen(dl_spec["contem_dirichlet"]), - _grid_size(dl_spec["grid_size"]), _shape(dl_spec["shape"]), _rate(dl_spec["rate"]) {} + _grid_size(CAST_INT(dl_spec["grid_size"])), _shape(CAST_DOUBLE(dl_spec["shape"])), _rate(CAST_DOUBLE(dl_spec["rate"])) {} }; struct LdltInits : public RegInits { Eigen::VectorXd _diag; - - LdltInits(Rcpp::List& init) + + LdltInits(LIST& init) : RegInits(init), - _diag(Rcpp::as(init["init_diag"])) {} + _diag(CAST(init["init_diag"])) {} }; struct HierminnInits : public LdltInits { @@ -249,9 +249,9 @@ struct HierminnInits : public LdltInits { double _cross_lambda; double _contem_lambda; - HierminnInits(Rcpp::List& init) + HierminnInits(LIST& init) : LdltInits(init), - _own_lambda(init["own_lambda"]), _cross_lambda(init["cross_lambda"]), _contem_lambda(init["contem_lambda"]) {} + _own_lambda(CAST_DOUBLE(init["own_lambda"])), _cross_lambda(CAST_DOUBLE(init["cross_lambda"])), _contem_lambda(CAST_DOUBLE(init["contem_lambda"])) {} }; struct SsvsInits : public LdltInits { @@ -261,13 +261,13 @@ struct SsvsInits : public LdltInits { Eigen::VectorXd _coef_slab; Eigen::VectorXd _contem_slab; - SsvsInits(Rcpp::List& init) + SsvsInits(LIST& init) : LdltInits(init), - _coef_dummy(Rcpp::as(init["init_coef_dummy"])), - _coef_weight(Rcpp::as(init["coef_mixture"])), - _contem_weight(Rcpp::as(init["chol_mixture"])), - _coef_slab(Rcpp::as(init["coef_slab"])), - _contem_slab(Rcpp::as(init["contem_slab"])) {} + _coef_dummy(CAST(init["init_coef_dummy"])), + _coef_weight(CAST(init["coef_mixture"])), + _contem_weight(CAST(init["chol_mixture"])), + _coef_slab(CAST(init["coef_slab"])), + _contem_slab(CAST(init["contem_slab"])) {} }; struct GlInits : public LdltInits { @@ -276,30 +276,30 @@ struct GlInits : public LdltInits { Eigen::VectorXd _init_contem_local; Eigen::VectorXd _init_conetm_global; - GlInits(Rcpp::List& init) + GlInits(LIST& init) : LdltInits(init), - _init_local(Rcpp::as(init["local_sparsity"])), - _init_global(init["global_sparsity"]), - _init_contem_local(Rcpp::as(init["contem_local_sparsity"])), - _init_conetm_global(Rcpp::as(init["contem_global_sparsity"])) {} + _init_local(CAST(init["local_sparsity"])), + _init_global(CAST_DOUBLE(init["global_sparsity"])), + _init_contem_local(CAST(init["contem_local_sparsity"])), + _init_conetm_global(CAST(init["contem_global_sparsity"])) {} }; struct HsInits : public GlInits { Eigen::VectorXd _init_group; - HsInits(Rcpp::List& init) + HsInits(LIST& init) : GlInits(init), - _init_group(Rcpp::as(init["group_sparsity"])) {} + _init_group(CAST(init["group_sparsity"])) {} }; struct NgInits : public HsInits { Eigen::VectorXd _init_local_shape; double _init_contem_shape; - NgInits(Rcpp::List& init) + NgInits(LIST& init) : HsInits(init), - _init_local_shape(Rcpp::as(init["local_shape"])), - _init_contem_shape(init["contem_shape"]) {} + _init_local_shape(CAST(init["local_shape"])), + _init_contem_shape(CAST_DOUBLE(init["contem_shape"])) {} }; struct LdltRecords : public RegRecords { @@ -371,7 +371,12 @@ class McmcReg { } virtual ~McmcReg() = default; virtual void doPosteriorDraws() = 0; - virtual Rcpp::List returnRecords(int num_burn, int thin) const = 0; +// #ifdef USE_RCPP +// virtual Rcpp::List returnRecords(int num_burn, int thin) const = 0; +// #else +// virtual py::dict returnRecords(int num_burn, int thin) const = 0; +// #endif + virtual LIST returnRecords(int num_burn, int thin) const = 0; LdltRecords returnLdltRecords(int num_burn, int thin, bool sparse = false) const { if (sparse) { Eigen::MatrixXd coef_record(num_iter + 1, num_coef); @@ -510,19 +515,40 @@ class MinnReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -585,19 +611,40 @@ class HierminnReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -699,20 +746,42 @@ class SsvsReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("gamma_record") = ssvs_record.coef_dummy_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("gamma_record") = ssvs_record.coef_dummy_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("gamma_record") = ssvs_record.coef_dummy_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -817,26 +886,55 @@ class HorseshoeReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("lambda_record") = hs_record.local_record, - Rcpp::Named("eta_record") = hs_record.group_record, - Rcpp::Named("tau_record") = hs_record.global_record, - Rcpp::Named("kappa_record") = hs_record.shrink_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("lambda_record") = hs_record.local_record, +// Rcpp::Named("eta_record") = hs_record.group_record, +// Rcpp::Named("tau_record") = hs_record.global_record, +// Rcpp::Named("kappa_record") = hs_record.shrink_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// if (Rcpp::is(record)) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } else { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("lambda_record") = hs_record.local_record, + NAMED("eta_record") = hs_record.group_record, + NAMED("tau_record") = hs_record.global_record, + NAMED("kappa_record") = hs_record.shrink_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } } return res; @@ -949,25 +1047,53 @@ class NgReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("lambda_record") = ng_record.local_record, - Rcpp::Named("eta_record") = ng_record.group_record, - Rcpp::Named("tau_record") = ng_record.global_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("lambda_record") = ng_record.local_record, +// Rcpp::Named("eta_record") = ng_record.group_record, +// Rcpp::Named("tau_record") = ng_record.global_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// if (Rcpp::is(record)) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } else { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("lambda_record") = ng_record.local_record, + NAMED("eta_record") = ng_record.group_record, + NAMED("tau_record") = ng_record.global_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } } return res; @@ -1081,24 +1207,51 @@ class DlReg : public McmcReg { updateDiag(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), - Rcpp::Named("a_record") = reg_record.contem_coef_record, - Rcpp::Named("d_record") = reg_record.fac_record, - Rcpp::Named("lambda_record") = dl_record.local_record, - Rcpp::Named("tau_record") = dl_record.global_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// #ifdef USE_RCPP +// Rcpp::List returnRecords(int num_burn, int thin) const override { +// Rcpp::List res = Rcpp::List::create( +// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), +// Rcpp::Named("a_record") = reg_record.contem_coef_record, +// Rcpp::Named("d_record") = reg_record.fac_record, +// Rcpp::Named("lambda_record") = dl_record.local_record, +// Rcpp::Named("tau_record") = dl_record.global_record, +// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, +// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record +// ); +// if (include_mean) { +// res["c_record"] = reg_record.coef_record.rightCols(dim); +// } +// for (auto& record : res) { +// if (Rcpp::is(record)) { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } else { +// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); +// } +// } +// return res; +// } +// #else +// py::dict returnRecords(int num_burn, int thin) const override; +// #endif + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), + NAMED("a_record") = reg_record.contem_coef_record, + NAMED("d_record") = reg_record.fac_record, + NAMED("lambda_record") = dl_record.local_record, + NAMED("tau_record") = dl_record.global_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = reg_record.coef_record.rightCols(dim); + // res["c_record"] = reg_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(reg_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } } return res; diff --git a/inst/include/mcmcsv.h b/inst/include/mcmcsv.h index 031949e3..e78a0201 100644 --- a/inst/include/mcmcsv.h +++ b/inst/include/mcmcsv.h @@ -39,12 +39,12 @@ struct SvParams : public RegParams { SvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& spec, Rcpp::List& intercept, + LIST& spec, LIST& intercept, bool include_mean ) : RegParams(num_iter, x, y, spec, intercept, include_mean), - _init_mean(Rcpp::as(spec["initial_mean"])), - _init_prec(Rcpp::as(spec["initial_prec"])) {} + _init_mean(CAST(spec["initial_mean"])), + _init_prec(CAST(spec["initial_prec"])) {} }; struct MinnSvParams : public SvParams { @@ -54,27 +54,27 @@ struct MinnSvParams : public SvParams { MinnSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, Rcpp::List& priors, Rcpp::List& intercept, + LIST& sv_spec, LIST& priors, LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), _prec_diag(Eigen::MatrixXd::Zero(y.cols(), y.cols())) { - int lag = priors["p"]; // append to bayes_spec, p = 3 in VHAR - Eigen::VectorXd _sigma = Rcpp::as(priors["sigma"]); - double _lambda = priors["lambda"]; - double _eps = priors["eps"]; + int lag = CAST_INT(priors["p"]); // append to bayes_spec, p = 3 in VHAR + Eigen::VectorXd _sigma = CAST(priors["sigma"]); + double _lambda = CAST_DOUBLE(priors["lambda"]); + double _eps = CAST_DOUBLE(priors["eps"]); int dim = _sigma.size(); Eigen::VectorXd _daily(dim); Eigen::VectorXd _weekly(dim); Eigen::VectorXd _monthly(dim); - if (priors.containsElementNamed("delta")) { - _daily = Rcpp::as(priors["delta"]); + if (CONTAINS(priors, "delta")) { + _daily = CAST(priors["delta"]); _weekly.setZero(); _monthly.setZero(); } else { - _daily = Rcpp::as(priors["daily"]); - _weekly = Rcpp::as(priors["weekly"]); - _monthly = Rcpp::as(priors["monthly"]); + _daily = CAST(priors["daily"]); + _weekly = CAST(priors["weekly"]); + _monthly = CAST(priors["monthly"]); } Eigen::MatrixXd dummy_response = build_ydummy(lag, _sigma, _lambda, _daily, _weekly, _monthly, false); Eigen::MatrixXd dummy_design = build_xdummy( @@ -102,29 +102,29 @@ struct HierminnSvParams : public SvParams { HierminnSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, + LIST& sv_spec, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& priors, Rcpp::List& intercept, + LIST& priors, LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), - shape(priors["shape"]), rate(priors["rate"]), + shape(CAST_DOUBLE(priors["shape"])), rate(CAST_DOUBLE(priors["rate"])), _prec_diag(Eigen::MatrixXd::Zero(y.cols(), y.cols())) { - int lag = priors["p"]; // append to bayes_spec, p = 3 in VHAR - Eigen::VectorXd _sigma = Rcpp::as(priors["sigma"]); - double _eps = priors["eps"]; + int lag = CAST_INT(priors["p"]); // append to bayes_spec, p = 3 in VHAR + Eigen::VectorXd _sigma = CAST(priors["sigma"]); + double _eps = CAST_DOUBLE(priors["eps"]); int dim = _sigma.size(); Eigen::VectorXd _daily(dim); Eigen::VectorXd _weekly(dim); Eigen::VectorXd _monthly(dim); - if (priors.containsElementNamed("delta")) { - _daily = Rcpp::as(priors["delta"]); + if (CONTAINS(priors, "delta")) { + _daily = CAST(priors["delta"]); _weekly.setZero(); _monthly.setZero(); } else { - _daily = Rcpp::as(priors["daily"]); - _weekly = Rcpp::as(priors["weekly"]); - _monthly = Rcpp::as(priors["monthly"]); + _daily = CAST(priors["daily"]); + _weekly = CAST(priors["weekly"]); + _monthly = CAST(priors["monthly"]); } Eigen::MatrixXd dummy_response = build_ydummy(lag, _sigma, 1, _daily, _weekly, _monthly, false); Eigen::MatrixXd dummy_design = build_xdummy( @@ -170,9 +170,9 @@ struct SsvsSvParams : public SvParams { SsvsSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, + LIST& sv_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& ssvs_spec, Rcpp::List& intercept, + LIST& ssvs_spec, LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), @@ -183,11 +183,11 @@ struct SsvsSvParams : public SvParams { // _contem_spike(Rcpp::as(ssvs_spec["chol_spike"])), // _contem_slab(Rcpp::as(ssvs_spec["chol_slab"])), // _contem_weight(Rcpp::as(ssvs_spec["chol_mixture"])), - _coef_s1(Rcpp::as(ssvs_spec["coef_s1"])), _coef_s2(Rcpp::as(ssvs_spec["coef_s2"])), - _contem_s1(ssvs_spec["chol_s1"]), _contem_s2(ssvs_spec["chol_s2"]), - _coef_spike_scl(ssvs_spec["coef_spike_scl"]), _contem_spike_scl(ssvs_spec["chol_spike_scl"]), - _coef_slab_shape(ssvs_spec["coef_slab_shape"]), _coef_slab_scl(ssvs_spec["coef_slab_scl"]), - _contem_slab_shape(ssvs_spec["chol_slab_shape"]), _contem_slab_scl(ssvs_spec["chol_slab_scl"]) {} + _coef_s1(CAST(ssvs_spec["coef_s1"])), _coef_s2(CAST(ssvs_spec["coef_s2"])), + _contem_s1(CAST_DOUBLE(ssvs_spec["chol_s1"])), _contem_s2(CAST_DOUBLE(ssvs_spec["chol_s2"])), + _coef_spike_scl(CAST_DOUBLE(ssvs_spec["coef_spike_scl"])), _contem_spike_scl(CAST_DOUBLE(ssvs_spec["chol_spike_scl"])), + _coef_slab_shape(CAST_DOUBLE(ssvs_spec["coef_slab_shape"])), _coef_slab_scl(CAST_DOUBLE(ssvs_spec["coef_slab_scl"])), + _contem_slab_shape(CAST_DOUBLE(ssvs_spec["chol_slab_shape"])), _contem_slab_scl(CAST_DOUBLE(ssvs_spec["chol_slab_scl"])) {} }; struct HsSvParams : public SvParams { @@ -196,9 +196,9 @@ struct HsSvParams : public SvParams { HsSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, + LIST& sv_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& intercept, bool include_mean + LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat) {} }; @@ -218,18 +218,18 @@ struct NgSvParams : public SvParams { NgSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, + LIST& sv_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& ng_spec, Rcpp::List& intercept, + LIST& ng_spec, LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat), // _local_shape(ng_spec["local_shape"]), // _contem_shape(ng_spec["contem_shape"]), - _mh_sd(ng_spec["shape_sd"]), - _group_shape(ng_spec["group_shape"]), _group_scl(ng_spec["group_scale"]), - _global_shape(ng_spec["global_shape"]), _global_scl(ng_spec["global_scale"]), - _contem_global_shape(ng_spec["contem_global_shape"]), _contem_global_scl(ng_spec["contem_global_scale"]) {} + _mh_sd(CAST_DOUBLE(ng_spec["shape_sd"])), + _group_shape(CAST_DOUBLE(ng_spec["group_shape"])), _group_scl(CAST_DOUBLE(ng_spec["group_scale"])), + _global_shape(CAST_DOUBLE(ng_spec["global_shape"])), _global_scl(CAST_DOUBLE(ng_spec["global_scale"])), + _contem_global_shape(CAST_DOUBLE(ng_spec["contem_global_shape"])), _contem_global_scl(CAST_DOUBLE(ng_spec["contem_global_scale"])) {} }; struct DlSvParams : public SvParams { @@ -243,15 +243,15 @@ struct DlSvParams : public SvParams { DlSvParams( int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, - Rcpp::List& sv_spec, + LIST& sv_spec, const Eigen::VectorXi& grp_id, const Eigen::MatrixXi& grp_mat, - Rcpp::List& dl_spec, Rcpp::List& intercept, + LIST& dl_spec, LIST& intercept, bool include_mean ) : SvParams(num_iter, x, y, sv_spec, intercept, include_mean), _grp_id(grp_id), _grp_mat(grp_mat), // _dl_concen(dl_spec["dirichlet"]), _contem_dl_concen(dl_spec["contem_dirichlet"]), - _grid_size(dl_spec["grid_size"]), _shape(dl_spec["shape"]), _rate(dl_spec["rate"]) {} + _grid_size(CAST_INT(dl_spec["grid_size"])), _shape(CAST_DOUBLE(dl_spec["shape"])), _rate(CAST_DOUBLE(dl_spec["rate"])) {} }; struct SvInits : public RegInits { @@ -267,16 +267,16 @@ struct SvInits : public RegInits { _lvol = _lvol_init.transpose().replicate(num_design, 1); _lvol_sig = .1 * Eigen::VectorXd::Ones(dim); } - SvInits(Rcpp::List& init) + SvInits(LIST& init) : RegInits(init), - _lvol_init(Rcpp::as(init["lvol_init"])), - _lvol(Rcpp::as(init["lvol"])), - _lvol_sig(Rcpp::as(init["lvol_sig"])) {} - SvInits(Rcpp::List& init, int num_design) + _lvol_init(CAST(init["lvol_init"])), + _lvol(CAST(init["lvol"])), + _lvol_sig(CAST(init["lvol_sig"])) {} + SvInits(LIST& init, int num_design) : RegInits(init), - _lvol_init(Rcpp::as(init["lvol_init"])), + _lvol_init(CAST(init["lvol_init"])), _lvol(_lvol_init.transpose().replicate(num_design, 1)), - _lvol_sig(Rcpp::as(init["lvol_sig"])) {} + _lvol_sig(CAST(init["lvol_sig"])) {} }; struct HierminnSvInits : public SvInits { @@ -284,13 +284,13 @@ struct HierminnSvInits : public SvInits { double _cross_lambda; double _contem_lambda; - HierminnSvInits(Rcpp::List& init) + HierminnSvInits(LIST& init) : SvInits(init), - _own_lambda(init["own_lambda"]), _cross_lambda(init["cross_lambda"]), _contem_lambda(init["contem_lambda"]) {} + _own_lambda(CAST_DOUBLE(init["own_lambda"])), _cross_lambda(CAST_DOUBLE(init["cross_lambda"])), _contem_lambda(CAST_DOUBLE(init["contem_lambda"])) {} - HierminnSvInits(Rcpp::List& init, int num_design) + HierminnSvInits(LIST& init, int num_design) : SvInits(init, num_design), - _own_lambda(init["own_lambda"]), _cross_lambda(init["cross_lambda"]), _contem_lambda(init["contem_lambda"]) {} + _own_lambda(CAST_DOUBLE(init["own_lambda"])), _cross_lambda(CAST_DOUBLE(init["cross_lambda"])), _contem_lambda(CAST_DOUBLE(init["contem_lambda"])) {} }; struct SsvsSvInits : public SvInits { @@ -300,21 +300,21 @@ struct SsvsSvInits : public SvInits { Eigen::VectorXd _coef_slab; Eigen::VectorXd _contem_slab; - SsvsSvInits(Rcpp::List& init) + SsvsSvInits(LIST& init) : SvInits(init), - _coef_dummy(Rcpp::as(init["init_coef_dummy"])), - _coef_weight(Rcpp::as(init["coef_mixture"])), - _contem_weight(Rcpp::as(init["chol_mixture"])), - _coef_slab(Rcpp::as(init["coef_slab"])), - _contem_slab(Rcpp::as(init["contem_slab"])) {} + _coef_dummy(CAST(init["init_coef_dummy"])), + _coef_weight(CAST(init["coef_mixture"])), + _contem_weight(CAST(init["chol_mixture"])), + _coef_slab(CAST(init["coef_slab"])), + _contem_slab(CAST(init["contem_slab"])) {} - SsvsSvInits(Rcpp::List& init, int num_design) + SsvsSvInits(LIST& init, int num_design) : SvInits(init, num_design), - _coef_dummy(Rcpp::as(init["init_coef_dummy"])), - _coef_weight(Rcpp::as(init["coef_mixture"])), - _contem_weight(Rcpp::as(init["chol_mixture"])), - _coef_slab(Rcpp::as(init["coef_slab"])), - _contem_slab(Rcpp::as(init["contem_slab"])) {} + _coef_dummy(CAST(init["init_coef_dummy"])), + _coef_weight(CAST(init["coef_mixture"])), + _contem_weight(CAST(init["chol_mixture"])), + _coef_slab(CAST(init["coef_slab"])), + _contem_slab(CAST(init["contem_slab"])) {} }; struct GlSvInits : public SvInits { @@ -323,46 +323,46 @@ struct GlSvInits : public SvInits { Eigen::VectorXd _init_contem_local; Eigen::VectorXd _init_conetm_global; - GlSvInits(Rcpp::List& init) + GlSvInits(LIST& init) : SvInits(init), - _init_local(Rcpp::as(init["local_sparsity"])), - _init_global(init["global_sparsity"]), - _init_contem_local(Rcpp::as(init["contem_local_sparsity"])), - _init_conetm_global(Rcpp::as(init["contem_global_sparsity"])) {} + _init_local(CAST(init["local_sparsity"])), + _init_global(CAST_DOUBLE(init["global_sparsity"])), + _init_contem_local(CAST(init["contem_local_sparsity"])), + _init_conetm_global(CAST(init["contem_global_sparsity"])) {} - GlSvInits(Rcpp::List& init, int num_design) + GlSvInits(LIST& init, int num_design) : SvInits(init, num_design), - _init_local(Rcpp::as(init["local_sparsity"])), - _init_global(init["global_sparsity"]), - _init_contem_local(Rcpp::as(init["contem_local_sparsity"])), - _init_conetm_global(Rcpp::as(init["contem_global_sparsity"])) {} + _init_local(CAST(init["local_sparsity"])), + _init_global(CAST_DOUBLE(init["global_sparsity"])), + _init_contem_local(CAST(init["contem_local_sparsity"])), + _init_conetm_global(CAST(init["contem_global_sparsity"])) {} }; struct HsSvInits : public GlSvInits { Eigen::VectorXd _init_group; - HsSvInits(Rcpp::List& init) + HsSvInits(LIST& init) : GlSvInits(init), - _init_group(Rcpp::as(init["group_sparsity"])) {} + _init_group(CAST(init["group_sparsity"])) {} - HsSvInits(Rcpp::List& init, int num_design) + HsSvInits(LIST& init, int num_design) : GlSvInits(init, num_design), - _init_group(Rcpp::as(init["group_sparsity"])) {} + _init_group(CAST(init["group_sparsity"])) {} }; struct NgSvInits : public HsSvInits { Eigen::VectorXd _init_local_shape; double _init_contem_shape; - NgSvInits(Rcpp::List& init) + NgSvInits(LIST& init) : HsSvInits(init), - _init_local_shape(Rcpp::as(init["local_shape"])), - _init_contem_shape(init["contem_shape"]) {} + _init_local_shape(CAST(init["local_shape"])), + _init_contem_shape(CAST_DOUBLE(init["contem_shape"])) {} - NgSvInits(Rcpp::List& init, int num_design) + NgSvInits(LIST& init, int num_design) : HsSvInits(init, num_design), - _init_local_shape(Rcpp::as(init["local_shape"])), - _init_contem_shape(init["contem_shape"]) {} + _init_local_shape(CAST(init["local_shape"])), + _init_contem_shape(CAST_DOUBLE(init["contem_shape"])) {} }; struct SvRecords : public RegRecords { @@ -445,7 +445,7 @@ class McmcSv { } virtual ~McmcSv() = default; virtual void doPosteriorDraws() = 0; - virtual Rcpp::List returnRecords(int num_burn, int thin) const = 0; + virtual LIST returnRecords(int num_burn, int thin) const = 0; SvRecords returnSvRecords(int num_burn, int thin, bool sparse = false) const { if (sparse) { Eigen::MatrixXd coef_record(num_iter + 1, num_coef); @@ -596,21 +596,21 @@ class MinnSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -679,21 +679,21 @@ class HierminnSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -824,22 +824,22 @@ class SsvsSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("gamma_record") = ssvs_record.coef_dummy_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("gamma_record") = ssvs_record.coef_dummy_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } return res; } @@ -955,28 +955,28 @@ class HorseshoeSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("lambda_record") = hs_record.local_record, - Rcpp::Named("eta_record") = hs_record.group_record, - Rcpp::Named("tau_record") = hs_record.global_record, - Rcpp::Named("kappa_record") = hs_record.shrink_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("lambda_record") = hs_record.local_record, + NAMED("eta_record") = hs_record.group_record, + NAMED("tau_record") = hs_record.global_record, + NAMED("kappa_record") = hs_record.shrink_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } // record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); } @@ -1105,27 +1105,27 @@ class NormalgammaSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("lambda_record") = ng_record.local_record, - Rcpp::Named("eta_record") = ng_record.group_record, - Rcpp::Named("tau_record") = ng_record.global_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("lambda_record") = ng_record.local_record, + NAMED("eta_record") = ng_record.group_record, + NAMED("tau_record") = ng_record.global_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } // record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); } @@ -1247,26 +1247,26 @@ class DirLaplaceSv : public McmcSv { updateInitState(); updateRecords(); } - Rcpp::List returnRecords(int num_burn, int thin) const override { - Rcpp::List res = Rcpp::List::create( - Rcpp::Named("alpha_record") = sv_record.coef_record.leftCols(num_alpha), - Rcpp::Named("h_record") = sv_record.lvol_record, - Rcpp::Named("a_record") = sv_record.contem_coef_record, - Rcpp::Named("h0_record") = sv_record.lvol_init_record, - Rcpp::Named("sigh_record") = sv_record.lvol_sig_record, - Rcpp::Named("lambda_record") = dl_record.local_record, - Rcpp::Named("tau_record") = dl_record.global_record, - Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, - Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record + LIST returnRecords(int num_burn, int thin) const override { + LIST res = CREATE_LIST( + NAMED("alpha_record") = sv_record.coef_record.leftCols(num_alpha), + NAMED("h_record") = sv_record.lvol_record, + NAMED("a_record") = sv_record.contem_coef_record, + NAMED("h0_record") = sv_record.lvol_init_record, + NAMED("sigh_record") = sv_record.lvol_sig_record, + NAMED("lambda_record") = dl_record.local_record, + NAMED("tau_record") = dl_record.global_record, + NAMED("alpha_sparse_record") = sparse_record.coef_record, + NAMED("a_sparse_record") = sparse_record.contem_coef_record ); if (include_mean) { - res["c_record"] = sv_record.coef_record.rightCols(dim); + res["c_record"] = CAST_MATRIX(sv_record.coef_record.rightCols(dim)); } for (auto& record : res) { - if (Rcpp::is(record)) { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + if (IS_MATRIX(ACCESS_LIST(record, res))) { + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } else { - record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); + ACCESS_LIST(record, res) = thin_record(CAST(ACCESS_LIST(record, res)), num_iter, num_burn, thin); } } return res; diff --git a/inst/include/ols.h b/inst/include/ols.h index 205370b2..8eb6443d 100644 --- a/inst/include/ols.h +++ b/inst/include/ols.h @@ -65,19 +65,19 @@ class MultiOls { void estimateCov() { cov = resid.transpose() * resid / (num_design - dim_design); } - Rcpp::List returnOlsRes() { + LIST returnOlsRes() { estimateCoef(); fitObs(); estimateCov(); - return Rcpp::List::create( - Rcpp::Named("coefficients") = coef, - Rcpp::Named("fitted.values") = yhat, - Rcpp::Named("residuals") = resid, - Rcpp::Named("covmat") = cov, - Rcpp::Named("df") = dim_design, - Rcpp::Named("m") = dim, - Rcpp::Named("obs") = num_design, - Rcpp::Named("y0") = response + return CREATE_LIST( + NAMED("coefficients") = coef, + NAMED("fitted.values") = yhat, + NAMED("residuals") = resid, + NAMED("covmat") = cov, + NAMED("df") = dim_design, + NAMED("m") = dim, + NAMED("obs") = num_design, + NAMED("y0") = response ); } OlsFit returnOlsFit(int ord) { @@ -158,8 +158,8 @@ class OlsVar { } } virtual ~OlsVar() = default; - Rcpp::List returnOlsRes() { - Rcpp::List ols_res = _ols->returnOlsRes(); + LIST returnOlsRes() { + LIST ols_res = _ols->returnOlsRes(); ols_res["p"] = lag; ols_res["totobs"] = data.rows(); ols_res["process"] = "VAR"; @@ -206,8 +206,8 @@ class OlsVhar { } } virtual ~OlsVhar() = default; - Rcpp::List returnOlsRes() { - Rcpp::List ols_res = _ols->returnOlsRes(); + LIST returnOlsRes() { + LIST ols_res = _ols->returnOlsRes(); ols_res["p"] = 3; ols_res["week"] = week; ols_res["month"] = month; diff --git a/python/.gitignore b/python/.gitignore new file mode 100644 index 00000000..0ec2b31b --- /dev/null +++ b/python/.gitignore @@ -0,0 +1,165 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +!docs/ +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +/.luarc.json diff --git a/python/CODE_OF_CONDUCT.md b/python/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..67fe8cee --- /dev/null +++ b/python/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[INSERT CONTACT METHOD]. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/python/LICENSE b/python/LICENSE new file mode 100644 index 00000000..65b68442 --- /dev/null +++ b/python/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 2024 Young Geun Kim + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + bvhar Copyright (C) 2024 Young Geun Kim + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/python/README.md b/python/README.md new file mode 100644 index 00000000..d492c08f --- /dev/null +++ b/python/README.md @@ -0,0 +1,49 @@ + + + +# bvhar + + + +[![py-conda-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-conda-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-conda-check.yaml?query=branch%3Afeature%2Fpython) +[![py-pip-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-pip-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-pip-check.yaml?query=branch%3Afeature%2Fpython) +[![py-wheel-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-wheel-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-wheel-check.yaml?query=branch%3Afeature%2Fpython) +[![Codecov test +coverage](https://codecov.io/gh/ygeunkim/bvhar/branch/feature%2Fpython/graph/badge.svg?flag=python)](https://app.codecov.io/gh/ygeunkim/bvhar) +![Python +Versions](https://img.shields.io/badge/python-3.10%20%7C%203.11%20%7C%203.12-blue) + + +This is `bvhar` package for python. We aims to add every feature in R +package. + +| Model | Class | prior | +|:-----:|:-----------:|:----------------------------------:| +| VAR | `VarOls` | | +| VHAR | `VharOls` | | +| BVAR | `VarBayes` | SSVS, Horseshoe, Minnesota, NG, DL | +| BVHAR | `VharBayes` | SSVS, Horseshoe, Minnesota, NG, DL | + +## Installation + +From GitHub (`feature/python` branch at this stage): + +``` bash +python -m pip install 'git+https://github.com/ygeunkim/bvhar.git@feature/python#egg=bvhar&subdirectory=python' +``` + +### Eigen and boost usage + +`bvhar` uses Eigen and boost headers in C++ sources. If you use `eigen` +and `boost-cpp` in conda with the conda environment, you don’t have to +take care of them. + +``` bash +conda env create -f requirements/environment.yml +conda activate bvhar-env +``` + +Otherwise, you should set their paths by yourself; at this stage. + +- `EIGEN_INCLUDE_DIR`: Eigen path that includes the eigen headers +- `BOOST_INCLUDE_DIR`: boost path that includes the boost headers diff --git a/python/README.qmd b/python/README.qmd new file mode 100644 index 00000000..24da27d3 --- /dev/null +++ b/python/README.qmd @@ -0,0 +1,49 @@ +--- +format: gfm +keep-ipynb: false +--- + + + +# bvhar + + +[![py-conda-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-conda-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-conda-check.yaml?query=branch%3Afeature%2Fpython) +[![py-pip-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-pip-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-pip-check.yaml?query=branch%3Afeature%2Fpython) +[![py-wheel-check](https://github.com/ygeunkim/bvhar/actions/workflows/py-wheel-check.yaml/badge.svg?branch=feature%2Fpython)](https://github.com/ygeunkim/bvhar/actions/workflows/py-wheel-check.yaml?query=branch%3Afeature%2Fpython) +[![Codecov test coverage](https://codecov.io/gh/ygeunkim/bvhar/branch/feature%2Fpython/graph/badge.svg?flag=python)](https://app.codecov.io/gh/ygeunkim/bvhar) +![Python Versions](https://img.shields.io/badge/python-3.10%20%7C%203.11%20%7C%203.12-blue) + + +This is `bvhar` package for python. We aims to add every feature in R package. + +| Model | Class | prior | +|:-----:|:--------:|:-----:| +| VAR | `VarOls` | | +| VHAR | `VharOls` | | +| BVAR | `VarBayes` | SSVS, Horseshoe, Minnesota, NG, DL | +| BVHAR | `VharBayes` | SSVS, Horseshoe, Minnesota, NG, DL | + +## Installation + +From GitHub (`feature/python` branch at this stage): + +```bash +python -m pip install 'git+https://github.com/ygeunkim/bvhar.git@feature/python#egg=bvhar&subdirectory=python' +``` + +### Eigen and boost usage + +`bvhar` uses Eigen and boost headers in C++ sources. +If you use `eigen` and `boost-cpp` in conda with the conda environment, you don't have to take care of them. + +```bash +conda env create -f requirements/environment.yml +conda activate bvhar-env +``` + +Otherwise, you should set their paths by yourself; at this stage. + +- `EIGEN_INCLUDE_DIR`: Eigen path that includes the eigen headers +- `BOOST_INCLUDE_DIR`: boost path that includes the boost headers + diff --git a/python/docs/.gitignore b/python/docs/.gitignore new file mode 100644 index 00000000..14723d8a --- /dev/null +++ b/python/docs/.gitignore @@ -0,0 +1,5 @@ +/.quarto/ +_site/ +objects.json +reference/ +src/ diff --git a/python/docs/_extensions/machow/interlinks/.gitignore b/python/docs/_extensions/machow/interlinks/.gitignore new file mode 100644 index 00000000..5a1bf0b4 --- /dev/null +++ b/python/docs/_extensions/machow/interlinks/.gitignore @@ -0,0 +1,3 @@ +*.html +*.pdf +*_files/ diff --git a/python/docs/_extensions/machow/interlinks/_extension.yml b/python/docs/_extensions/machow/interlinks/_extension.yml new file mode 100644 index 00000000..c8a81213 --- /dev/null +++ b/python/docs/_extensions/machow/interlinks/_extension.yml @@ -0,0 +1,7 @@ +title: Interlinks +author: Michael Chow +version: 1.1.0 +quarto-required: ">=1.2.0" +contributes: + filters: + - interlinks.lua diff --git a/python/docs/_extensions/machow/interlinks/interlinks.lua b/python/docs/_extensions/machow/interlinks/interlinks.lua new file mode 100644 index 00000000..47aa61fa --- /dev/null +++ b/python/docs/_extensions/machow/interlinks/interlinks.lua @@ -0,0 +1,254 @@ +local function read_inv_text(filename) + -- read file + local file = io.open(filename, "r") + if file == nil then + return nil + end + local str = file:read("a") + file:close() + + + local project = str:match("# Project: (%S+)") + local version = str:match("# Version: (%S+)") + + local data = {project = project, version = version, items = {}} + + local ptn_data = + "^" .. + "(.-)%s+" .. -- name + "([%S:]-):" .. -- domain + "([%S]+)%s+" .. -- role + "(%-?%d+)%s+" .. -- priority + "(%S*)%s+" .. -- uri + "(.-)\r?$" -- dispname + + + -- Iterate through each line in the file content + for line in str:gmatch("[^\r\n]+") do + if not line:match("^#") then + -- Match each line against the pattern + local name, domain, role, priority, uri, dispName = line:match(ptn_data) + + -- if name is nil, raise an error + if name == nil then + error("Error parsing line: " .. line) + end + + data.items[#data.items + 1] = { + name = name, + domain = domain, + role = role, + priority = priority, + uri = uri, + dispName = dispName + } + end + end + return data +end + +local function read_json(filename) + + local file = io.open(filename, "r") + if file == nil then + return nil + end + local str = file:read("a") + file:close() + + local decoded = quarto.json.decode(str) + return decoded +end + +local function read_inv_text_or_json(base_name) + local file = io.open(base_name .. ".txt", "r") + if file then + -- TODO: refactors so we don't just close the file immediately + io.close(file) + json = read_inv_text(base_name .. ".txt") + + else + json = read_json(base_name .. ".json") + end + + return json +end + +local inventory = {} + +local function lookup(search_object) + + local results = {} + for _, inv in ipairs(inventory) do + for _, item in ipairs(inv.items) do + -- e.g. :external+:::`` + if item.inv_name and item.inv_name ~= search_object.inv_name then + goto continue + end + + if item.name ~= search_object.name then + goto continue + end + + if search_object.role and item.role ~= search_object.role then + goto continue + end + + if search_object.domain and item.domain ~= search_object.domain then + goto continue + else + if search_object.domain or item.domain == "py" then + table.insert(results, item) + end + + goto continue + end + + ::continue:: + end + end + + if #results == 1 then + return results[1] + end + if #results > 1 then + quarto.log.warning("Found multiple matches for " .. search_object.name .. ", using the first match.") + return results[1] + end + if #results == 0 then + quarto.log.warning("Found no matches for object:\n", search_object) + end + + return nil +end + +local function mysplit (inputstr, sep) + if sep == nil then + sep = "%s" + end + local t={} + for str in string.gmatch(inputstr, "([^"..sep.."]+)") do + table.insert(t, str) + end + return t +end + +local function normalize_role(role) + if role == "func" then + return "function" + end + return role +end + +local function build_search_object(str) + local starts_with_colon = str:sub(1, 1) == ":" + local search = {} + if starts_with_colon then + local t = mysplit(str, ":") + if #t == 2 then + -- e.g. :py:func:`my_func` + search.role = normalize_role(t[1]) + search.name = t[2]:match("%%60(.*)%%60") + elseif #t == 3 then + -- e.g. :py:func:`my_func` + search.domain = t[1] + search.role = normalize_role(t[2]) + search.name = t[3]:match("%%60(.*)%%60") + elseif #t == 4 then + -- e.g. :ext+inv:py:func:`my_func` + search.external = true + + search.inv_name = t[1]:match("external%+(.*)") + search.domain = t[2] + search.role = normalize_role(t[3]) + search.name = t[4]:match("%%60(.*)%%60") + else + quarto.log.warning("couldn't parse this link: " .. str) + return {} + end + else + search.name = str:match("%%60(.*)%%60") + end + + if search.name == nil then + quarto.log.warning("couldn't parse this link: " .. str) + return {} + end + + if search.name:sub(1, 1) == "~" then + search.shortened = true + search.name = search.name:sub(2, -1) + end + return search +end + +local function report_broken_link(link, search_object, replacement) + -- TODO: how to unescape html elements like [? + return pandoc.Code(pandoc.utils.stringify(link.content)) +end + +function Link(link) + -- do not process regular links ---- + if not link.target:match("%%60") then + return link + end + + -- lookup item ---- + local search = build_search_object(link.target) + local item = lookup(search) + + -- determine replacement, used if no link text specified ---- + local original_text = pandoc.utils.stringify(link.content) + local replacement = search.name + if search.shortened then + local t = mysplit(search.name, ".") + replacement = t[#t] + end + + -- set link text ---- + if original_text == "" and replacement ~= nil then + link.content = pandoc.Code(replacement) + end + + -- report broken links ---- + if item == nil then + return report_broken_link(link, search) + end + link.target = item.uri:gsub("%$$", search.name) + + + return link +end + +local function fixup_json(json, prefix) + for _, item in ipairs(json.items) do + item.uri = prefix .. item.uri + end + table.insert(inventory, json) +end + +return { + { + Meta = function(meta) + local json + local prefix + if meta.interlinks and meta.interlinks.sources then + for k, v in pairs(meta.interlinks.sources) do + local base_name = quarto.project.offset .. "/_inv/" .. k .. "_objects" + json = read_inv_text_or_json(base_name) + prefix = pandoc.utils.stringify(v.url) + if json ~= nil then + fixup_json(json, prefix) + end + end + end + json = read_inv_text_or_json(quarto.project.offset .. "/objects") + if json ~= nil then + fixup_json(json, "/") + end + end + }, + { + Link = Link + } +} diff --git a/python/docs/_quarto.yml b/python/docs/_quarto.yml new file mode 100644 index 00000000..02f4f3bf --- /dev/null +++ b/python/docs/_quarto.yml @@ -0,0 +1,107 @@ +project: + type: website + output-dir: _site + +website: + title: "bvhar for Python" + description: "Bayesian multivariate time series modeling" + page-navigation: true + favicon: "favicon.ico" + # page-footer: + navbar: + background: light + pinned: true + logo: "logo.png" + left: + - text: "Get started" + file: get_started.qmd + - text: "Reference" + file: reference/index.qmd + - text: Vignettes + menu: + - text: "bvhar" + file: intro.qmd + - text: "bvhar in R" + href: https://ygeunkim.github.io/package/bvhar/ + target: _blank + right: + - icon: github + href: https://github.com/ygeunkim/bvhar/tree/feature/python/python + aria-label: bvhar-python github + sidebar: + style: floating + collapse-level: 1 + +format: + html: + sidebar: false + theme: + light: [journal, mypkgdown.scss] + highlight-style: github-dark + css: mypkgdown.css + code-background: true + code-copy: true + code-overflow: wrap + toc: true + +include-in-header: + - text: | + + +quartodoc: + title: Reference + package: bvhar + sections: + - title: VAR + desc: "Vector autoregressive model" + package: bvhar.model + contents: + - VarOls + - VarBayes + - title: VHAR + desc: "Vector heterogeneous autoregressive model" + package: bvhar.model + contents: + - VharOls + - VharBayes + - title: Priors + desc: "Prior configuration" + package: bvhar.model + contents: + - SsvsConfig + - HorseshoeConfig + - MinnesotaConfig + - LambdaConfig + - NgConfig + - DlConfig + - LdltConfig + - SvConfig + - InterceptConfig + - title: Random + desc: "Random generation functions" + package: bvhar.random + contents: + - normal.generate_mnormal + - normal.generate_gig + - title: Datasets + desc: "Datasets" + package: bvhar.datasets + contents: + - load_vix + - title: Utility functions + desc: "Related to configuration" + package: bvhar.utils + contents: + - checkomp.is_omp + - checkomp.check_omp + +filters: + - interlinks + +interlinks: + sources: {} + +jupyter: python3 +execute: + keep-ipynb: false + # execute-dir: project \ No newline at end of file diff --git a/python/docs/favicon.ico b/python/docs/favicon.ico new file mode 100644 index 00000000..35dac086 Binary files /dev/null and b/python/docs/favicon.ico differ diff --git a/python/docs/get_started.qmd b/python/docs/get_started.qmd new file mode 100644 index 00000000..6fb417ec --- /dev/null +++ b/python/docs/get_started.qmd @@ -0,0 +1,103 @@ +# bvhar Getting Started + +To install `bvhar` in non-conda environment, you should have Eigen and boost libraries in system. + +## Installing + +### Eigen and boost + +In Linux, + +```bash +sudo apt-get update +sudo apt-get install -y libboost-all-dev libeigen3-dev +echo "export EIGEN_INCLUDE_DIR=/usr/include/eigen3" >> ~/.bashrc +echo "export BOOST_INCLUDE_DIR=/usr/include/boost" >> ~/.bashrc +source ~/.bashrc +``` + +In macOS, + +```zsh +brew update +brew install boost eigen +EIGEN_LOC=$(brew --prefix eigen) +BOOST_LOC=$(brew --prefix boost) +echo "export EIGEN_INCLUDE_DIR=$(brew --prefix eigen)/include/eigen3" >> ~/.zshrc +echo "export BOOST_INCLUDE_DIR=$(brew --prefix boost)/include" >> ~/.zshrc +source ~/.zshrc +``` + +In both Linux and macOS, verify the installation with + +```bash +ls $EIGEN_INCLUDE_DIR +ls $BOOST_INCLUDE_DIR +``` + +For Windows, you can easily install both libraries using Chocolatey: + +```powershell +choco install eigen -y +choco install boost-msvc-14.3 -y +``` + +Set the environment variables: + +```powershell +$eigenPath = ( + Get-ChildItem -Path "C:\ProgramData\chocolatey\lib\eigen" -Recurse -Filter "Eigen" | + Select-Object -First 1 +).Parent.FullName +$boostPath = $null +$boostCand = @("C:\local", "C:\ProgramData\chocolatey\lib") +foreach ($cand in $boostCand) { + $isPath = ( + Get-ChildItem -Path $cand -Directory | + Where-Object { $_.Name -match "boost" } | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 + ).FullName + if ($isPath) { + $boostPath = $isPath + break + } +} +[System.Environment]::SetEnvironmentVariable("EIGEN_INCLUDE_DIR", $eigenPath, [System.EnvironmentVariableTarget]::Machine) +[System.Environment]::SetEnvironmentVariable("BOOST_INCLUDE_DIR", $boostPath, [System.EnvironmentVariableTarget]::Machine) +``` + +Verify the environment variables and installation: + +```powershell +dir $env:EIGEN_INCLUDE_DIR +dir $env:BOOST_INCLUDE_DIR +``` + +## OpenMP + +OpenMP multithreading is used when conducting parallel chains MCMC. If OpenMP is not enabled, the chains are run sequentially. `bvhar` provides OpenMP checking functions. + +```{python} +from bvhar.utils import checkomp +checkomp.check_omp() +``` + +`True` if enabled, `False` if not: + +```{python} +checkomp.is_omp() +``` + +In macOS, you need additional step to enable OpenMP. +There are many options you can consider. +Here is an example with LLVM. + +```zsh +brew update +brew install llvm libomp +echo "export CC=$(brew --prefix llvm)/bin/clang" >> ~/.zshrc +echo "export CXX=$(brew --prefix llvm)/bin/clang++" >> ~/.zshrc +echo "export CPPFLAGS=-I$(brew --prefix llvm)/include -I$(brew --prefix libomp)/include" >> ~/.zshrc +echo "export LDFLAGS=-L$(brew --prefix llvm)/lib -L$(brew --prefix libomp)/lib" >> ~/.zshrc +``` diff --git a/python/docs/index.qmd b/python/docs/index.qmd new file mode 100644 index 00000000..ba9c6c9b --- /dev/null +++ b/python/docs/index.qmd @@ -0,0 +1,44 @@ +--- +toc: false +--- + +# bvhar + +This is a Python version for [`bvhar`](https://cloud.r-project.org/web/packages/bvhar/index.html) package. + +The package implements C++ headers made in R package. +Learn more about [`bvhar for R`](https://ygeunkim.github.io/package/bvhar/index.html). + +::: {.callout-note} +`bvhar` for Python is not ready to use. This page is rendered just for testing. +::: + +## Eigen and boost setting + +`bvhar` C++ sources uses + +- [`Eigen`](https://eigen.tuxfamily.org/index.php?title=Main_Page) library: for matrix computation +- [`boost`](https://www.boost.org) library: for random generation. + +If you use `conda`, you can avoid manual setting of these libraries. +Check `requirements/environment.yml` in our repo. +Since conda provides [`eigen`](https://anaconda.org/conda-forge/eigen) and [`boost-cpp`](https://anaconda.org/conda-forge/boost-cpp), you can easily install them. + +Otherwise, you should set their paths by yourself; at this stage. + +- `EIGEN_INCLUDE_DIR`: Eigen path that includes the eigen headers +- `BOOST_INCLUDE_DIR`: boost path that includes the boost headers + +## Installation + +From GitHub: + +```bash +python -m pip install 'git+https://github.com/ygeunkim/bvhar.git@feature/python#egg=bvhar&subdirectory=python' +``` + +## Usage + +You can see the development status in PR [#10](https://github.com/ygeunkim/bvhar/pull/10). + + diff --git a/python/docs/intro.qmd b/python/docs/intro.qmd new file mode 100644 index 00000000..2345c505 --- /dev/null +++ b/python/docs/intro.qmd @@ -0,0 +1,41 @@ +# Introduction to bvhar + +```{python} +#| include: false +import pandas as pd +pd.options.display.max_rows = 25 +``` + +## Data + +This package includes the same CBOE ETF volatility index series. + +```{python} +from bvhar.datasets import load_vix +etf_vix = load_vix() +``` + +```{python} +#| echo: false +etf_vix +``` + +# Models + +```{python} +from bvhar.model import VarOls, VharOls +``` + +## VAR + +```{python} +fit_var = VarOls(etf_vix, 1, True) +fit_var.fit() +``` + +## VHAR + +## BVAR + +## BVHAR + diff --git a/python/docs/logo.png b/python/docs/logo.png new file mode 100644 index 00000000..6721f48a Binary files /dev/null and b/python/docs/logo.png differ diff --git a/python/docs/mypkgdown.css b/python/docs/mypkgdown.css new file mode 100644 index 00000000..adf08a90 --- /dev/null +++ b/python/docs/mypkgdown.css @@ -0,0 +1,38 @@ +body { + background-color: #272935; + color: #f8f8f2; +} + +a { + color: #ff3860; +} + +.navbar { + background-color: #23252f; +} + +code { + color: #f8f8f2; + background-color: #44475a; +} + +/* Ensure other elements follow the dark theme */ +.sidebar, .toc-content { + background-color: #272935; + color: #f8f8f2; +} + +/* Adjust link colors for better visibility in dark mode */ +a:hover { + color: #ff6b8b; +} + +/* .title-block { + display: inline-block; +} + +.title-logo { + float: right; + height: 138px; + margin-top: -60px; +} */ \ No newline at end of file diff --git a/python/docs/mypkgdown.scss b/python/docs/mypkgdown.scss new file mode 100644 index 00000000..534f45d3 --- /dev/null +++ b/python/docs/mypkgdown.scss @@ -0,0 +1,34 @@ +/*-- scss:defaults --*/ +$font-family-sans-serif: "Noto Sans", sans-serif !default; +$font-family-monospace: "Anonymous Pro", monospace !default; + +$body-bg: #272935 !default; +$body-color: #f8f8f2 !default; +$link-color: #ff3860 !default; + +$navbar-bg: #23252f !default; +$navbar-fg: #f8f8f2 !default; + +$code-block-bg: #44475a !default; +$code-color: #f8f8f2 !default; + +/*-- scss:rules --*/ +body { + background-color: $body-bg; + color: $body-color; +} + +.navbar { + background-color: $navbar-bg; + color: $navbar-fg; +} + +pre { + background-color: $code-block-bg; + color: $code-color; +} + +code { + color: $code-color; + background-color: $code-block-bg; +} \ No newline at end of file diff --git a/python/pyproject.toml b/python/pyproject.toml new file mode 100644 index 00000000..931de2cd --- /dev/null +++ b/python/pyproject.toml @@ -0,0 +1,76 @@ +[project] +name = "bvhar" +version = "0.0.0.9000" +description = "Bayesian multivariate time series modeling" +readme = "README.md" +authors = [ + { name = "Young Geun Kim", email = "ygeunkimstat@gmail.com" } +] +keywords = [ + "bayesian", + "time series" +] +dependencies = [ + "pybind11", + "numpy", + "pandas" +] +classifiers=[ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Programming Language :: C++", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering :: Mathematics" +] +requires-python = ">=3.10" +license = {file = "LICENSE"} + +[project.urls] +"Homepage" = "https://github.com/ygeunkim/bvhar/tree/feature/python" +"Bug Reports" = "https://github.com/ygeunkim/bvhar/issues" +"Source" = "https://github.com/ygeunkim/bvhar/tree/feature/python" + +[tool.setuptools.packages.find] +where = ["src"] +include = ["bvhar", "bvhar.*"] + +[tool.setuptools.package-data] +bvhar = [ + "**/*.cpp", + "datasets/data/*.csv" +] + +[tool.setuptools_scm] + +[project.optional-dependencies] +doc = [ + "ipykernel", + "ipython<=8.12", + "nbclient", + "nbformat", + "quartodoc" +] +dev = [ + "pytest", + "jupyter", + "quartodoc" +] + +[build-system] +requires = [ + "setuptools>=42", + "wheel", + "pybind11" +] +build-backend = "setuptools.build_meta" + +[tool.pytest.ini_options] +testpaths = ["tests"] \ No newline at end of file diff --git a/python/requirements/environment-dev.yml b/python/requirements/environment-dev.yml new file mode 100644 index 00000000..945b5fce --- /dev/null +++ b/python/requirements/environment-dev.yml @@ -0,0 +1,16 @@ +name: bvhar-dev +channels: + - conda-forge +dependencies: + - python>=3.10 + - boost-cpp>=1.84.0 + - eigen>=3.4.0 + - pybind11 + - numpy + - pandas + - pip + - pytest + - pytest-cov + - jupyter + - pip: + - quartodoc diff --git a/python/requirements/environment.yml b/python/requirements/environment.yml new file mode 100644 index 00000000..c96e123d --- /dev/null +++ b/python/requirements/environment.yml @@ -0,0 +1,14 @@ +name: bvhar-env +channels: + - conda-forge +dependencies: + - python>=3.10 + - boost-cpp>=1.84.0 + - eigen>=3.4.0 + - pybind11 + - numpy + - pandas + - pip + - pytest + - pip: + - quartodoc diff --git a/python/requirements/requirements-dev.txt b/python/requirements/requirements-dev.txt new file mode 100644 index 00000000..5cd68e6d --- /dev/null +++ b/python/requirements/requirements-dev.txt @@ -0,0 +1,6 @@ +pybind11 +numpy +pandas +pytest +jupyter +quartodoc \ No newline at end of file diff --git a/python/requirements/requirements.txt b/python/requirements/requirements.txt new file mode 100644 index 00000000..7a842f1c --- /dev/null +++ b/python/requirements/requirements.txt @@ -0,0 +1,4 @@ +pybind11 +numpy +pandas +pytest \ No newline at end of file diff --git a/python/setup.py b/python/setup.py new file mode 100644 index 00000000..969e8bb7 --- /dev/null +++ b/python/setup.py @@ -0,0 +1,148 @@ +from setuptools import setup, find_packages +from setuptools.command.build_ext import build_ext as _build_ext +import sys +import os +from pybind11.setup_helpers import Pybind11Extension +import tempfile + +with open("README.md", "r") as fh: + long_description = fh.read() + +# include_path = os.path.abspath('../inst/include') +include_path = os.path.abspath( + os.path.join( + os.path.dirname(__file__), + '..', 'inst', 'include' + ) +) + +class HeaderInclude(object): + def __init__(self, lib: str): + self.lib = lib + + def __str__(self): + conda_prefix = sys.prefix + print(f"Current environment path: {conda_prefix}") + if os.path.exists(os.path.join(conda_prefix, 'conda-meta')): + if sys.platform.startswith('win'): + self.lib = '' if self.lib == 'boost' else self.lib # should use include/ in windows-conda + lib_path = os.path.join(conda_prefix, 'Library', 'include', self.lib) + else: + lib_path = os.path.join(conda_prefix, 'include', self.lib) + if os.path.exists(lib_path): + print(f"Use {lib_path} for {self.lib} header") + return lib_path + else: + print(f"No {self.lib} in conda environment") + _lib = self.lib.rstrip('0123456789$').upper() + lib_dir = os.environ.get(f"{_lib}_INCLUDE_DIR") + if lib_dir: + # lib_path = os.path.join(lib_dir, 'include', self.lib) + lib_path = lib_dir + if os.path.exists(lib_path): + return lib_path + else: + raise RuntimeError(f"No {self.lib} found in {_lib}_INCLUDE_DIR") + else: + raise RuntimeError(f"Use conda or set {_lib}_INCLUDE_DIR environment variable") + +class BuildExt(_build_ext): + def has_flags(self, compiler, flag): + with tempfile.NamedTemporaryFile('w', suffix='.cpp', delete=False) as f: + f.write("int main() { return 0; }") + temp_file = f.name + f.close() + try: + compiler.compile([temp_file], extra_postargs=[flag]) + print(f"Use {flag} flag") + return True + except Exception as e: + print(f"Flag {flag} not supported by the compiler: {e}") + return False + finally: + if os.path.exists(temp_file): + os.unlink(temp_file) + + def build_extensions(self): + compile_args = [] + link_args = [] + if sys.platform.startswith('win'): + if self.has_flags(self.compiler, '/openmp'): + compile_args.append('/openmp') + else: + if self.has_flags(self.compiler, '-fopenmp'): + compile_args.append('-fopenmp') + link_args.append('-fopenmp') + for ext in self.extensions: + ext.extra_compile_args += compile_args + ext.extra_link_args += link_args + _build_ext.build_extensions(self) + +def find_module(base_dir): + extensions = [] + is_src = os.path.basename(base_dir) == 'src' + for root, dirs, files in os.walk(base_dir): + for cpp_file in files: + if cpp_file.endswith('.cpp'): + rel_path = os.path.relpath(root, base_dir) + module_name = os.path.splitext(cpp_file)[0] + if is_src: + rel_path = rel_path.replace('bvhar', '').strip(os.path.sep) + # module_name = f'bvhar.{rel_path.replace(os.path.sep, ".")}' if rel_path != "." else base_dir + module_name = f"bvhar.{rel_path.replace(os.path.sep, '.')}.{module_name}" if rel_path != "" else f"{base_dir}.{module_name}" + else: + module_name = f"{base_dir}.{rel_path.replace(os.path.sep, '.')}.{module_name}" if rel_path != "." else f"{base_dir}.{module_name}" + extensions.append( + Pybind11Extension( + module_name, + sources=[os.path.join(root, cpp_file)], + define_macros=[ + ('EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS', None), + ('BOOST_DISABLE_ASSERTS', None) + ], + include_dirs=[ + include_path, + str(HeaderInclude('eigen3')), + str(HeaderInclude('boost')) + ] + ) + ) + return extensions + +setup( + name='bvhar', + version='0.0.0.9000', + packages=find_packages(where='src'), + package_dir={'': 'src'}, + description='Bayesian multivariate time series modeling', + url='https://github.com/ygeunkim/bvhar/tree/feature/python', + long_description=long_description, + long_description_content_type='text/markdown', + author='Young Geun Kim', + author_email='ygeunkimstat@gmail.com', + keywords=[ + 'bayesian', + 'time series' + ], + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', + 'Programming Language :: C++', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Scientific/Engineering :: Mathematics' + ], + install_requires=[ + 'pybind11', + 'numpy', + 'pandas' + ], + ext_modules=find_module('src'), + cmdclass={'build_ext': BuildExt} +) diff --git a/python/src/bvhar/__init__.py b/python/src/bvhar/__init__.py new file mode 100644 index 00000000..a6b15887 --- /dev/null +++ b/python/src/bvhar/__init__.py @@ -0,0 +1,6 @@ +import os +from .utils.checkomp import is_omp + +if is_omp(): + if 'KMP_DUPLICATE_LIB_OK' not in os.environ: + os.environ['KMP_DUPLICATE_LIB_OK']='True' \ No newline at end of file diff --git a/python/src/bvhar/_src/__init__.py b/python/src/bvhar/_src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/src/bvhar/_src/_design.cpp b/python/src/bvhar/_src/_design.cpp new file mode 100644 index 00000000..7d99d83b --- /dev/null +++ b/python/src/bvhar/_src/_design.cpp @@ -0,0 +1,23 @@ +#include +#include +#include + +namespace py = pybind11; + +Eigen::MatrixXd build_response(Eigen::Ref y, int var_lag, int index) { + return bvhar::build_y0(y, var_lag, index); +} + +Eigen::MatrixXd build_design(Eigen::Ref y, int var_lag, bool include_mean) { + return bvhar::build_x0(y, var_lag, include_mean); +} + +Eigen::MatrixXd build_design(Eigen::Ref y, int week, int month, bool include_mean) { + return bvhar::build_x0(y, month, include_mean) * bvhar::build_vhar(y.cols(), week, month, include_mean).transpose(); +} + +PYBIND11_MODULE(_design, m) { + m.def("build_response", &build_response, "Build response matrix"); + m.def("build_design", py::overload_cast, int, bool>(&build_design), "Build design matrix"); + m.def("build_design", py::overload_cast, int, int, bool>(&build_design), "Build VHAR design matrix"); +} diff --git a/python/src/bvhar/_src/_ldlt.cpp b/python/src/bvhar/_src/_ldlt.cpp new file mode 100644 index 00000000..f9fd754a --- /dev/null +++ b/python/src/bvhar/_src/_ldlt.cpp @@ -0,0 +1,160 @@ +#include +#include +#include + +class McmcLdlt { +public: + McmcLdlt( + int num_chains, int num_iter, int num_burn, int thin, + const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, + std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, + const Eigen::MatrixXi& grp_mat, + bool include_mean, const Eigen::VectorXi& seed_chain, + bool display_progress, int nthreads + ) + : num_chains(num_chains), num_iter(num_iter), num_burn(num_burn), thin(thin), nthreads(nthreads), + display_progress(display_progress), sur_objs(num_chains), res(num_chains) { + switch (prior_type) { + case 1: { + bvhar::MinnParams minn_params( + num_iter, x, y, + param_reg, param_prior, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::LdltInits ldlt_inits(param_init[i]); + sur_objs[i].reset(new bvhar::MinnReg(minn_params, ldlt_inits, static_cast(seed_chain[i]))); + } + break; + } + case 2: { + bvhar::SsvsParams ssvs_params( + num_iter, x, y, + param_reg, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::SsvsInits ssvs_inits(param_init[i]); + sur_objs[i].reset(new bvhar::SsvsReg(ssvs_params, ssvs_inits, static_cast(seed_chain[i]))); + } + break; + } + case 3: { + bvhar::HorseshoeParams horseshoe_params( + num_iter, x, y, + param_reg, + grp_id, grp_mat, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::HsInits hs_inits(param_init[i]); + sur_objs[i].reset(new bvhar::HorseshoeReg(horseshoe_params, hs_inits, static_cast(seed_chain[i]))); + } + break; + } + case 4: { + bvhar::HierminnParams minn_params( + num_iter, x, y, + param_reg, + own_id, cross_id, grp_mat, + param_prior, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::HierminnInits minn_inits(param_init[i]); + sur_objs[i].reset(new bvhar::HierminnReg(minn_params, minn_inits, static_cast(seed_chain[i]))); + } + break; + } + case 5: { + bvhar::NgParams ng_params( + num_iter, x, y, + param_reg, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; ++i) { + bvhar::NgInits ng_inits(param_init[i]); + sur_objs[i].reset(new bvhar::NgReg(ng_params, ng_inits, static_cast(seed_chain[i]))); + } + break; + } + case 6: { + bvhar::DlParams dl_params( + num_iter, x, y, + param_reg, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; ++i) { + bvhar::GlInits dl_inits(param_init[i]); + sur_objs[i].reset(new bvhar::DlReg(dl_params, dl_inits, static_cast(seed_chain[i]))); + } + break; + } + } + } + virtual ~McmcLdlt() = default; + std::vector returnRecords() { + fit(); + return res; + } + +protected: + void runGibbs(int chain) { + bvhar::bvharprogress bar(num_iter, display_progress); + for (int i = 0; i < num_iter; ++i) { + bar.increment(); + sur_objs[chain]->doPosteriorDraws(); + bar.update(); + } + #ifdef _OPENMP + #pragma omp critical + #endif + { + res[chain] = sur_objs[chain]->returnRecords(num_burn, thin); + } + } + void fit() { + if (num_chains == 1) { + runGibbs(0); + } else { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int chain = 0; chain < num_chains; chain++) { + runGibbs(chain); + } + } + } + +private: + int num_chains; + int num_iter; + int num_burn; + int thin; + int nthreads; + bool display_progress; + std::vector> sur_objs; + std::vector res; +}; + +PYBIND11_MODULE(_ldlt, m) { + py::class_(m, "McmcLdlt") + .def( + py::init&, int, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, + const Eigen::MatrixXi&, bool, const Eigen::VectorXi&, bool, int>() + ) + .def("returnRecords", &McmcLdlt::returnRecords); +} \ No newline at end of file diff --git a/python/src/bvhar/_src/_ldltforecast.cpp b/python/src/bvhar/_src/_ldltforecast.cpp new file mode 100644 index 00000000..14087e3d --- /dev/null +++ b/python/src/bvhar/_src/_ldltforecast.cpp @@ -0,0 +1,590 @@ +#include +#include +#include + +class LdltForecast { +public: + LdltForecast( + int num_chains, int lag, int step, const Eigen::MatrixXd& y, + bool sparse, py::dict& fit_record, + const Eigen::VectorXi& seed_chain, bool include_mean, int nthreads + ) + : num_chains(num_chains), nthreads(nthreads), + forecaster(num_chains), density_forecast(num_chains) { + std::unique_ptr reg_record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + for (int i = 0; i < num_chains; ++i) { + py::list alpha_list = fit_record[alpha_name]; + // py::list alpha_list = fit_record[alpha_name].cast(); + py::list a_list = fit_record[a_name]; + py::list d_list = fit_record["d_record"]; + if (include_mean) { + py::list c_list = fit_record["c_record"]; + reg_record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } else { + reg_record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } + forecaster[i].reset(new bvhar::RegVarForecaster( + *reg_record, step, y, lag, include_mean, static_cast(seed_chain[i]) + )); + } + } + LdltForecast( + int num_chains, int week, int month, int step, const Eigen::MatrixXd& y, + bool sparse, py::dict& fit_record, + const Eigen::VectorXi& seed_chain, bool include_mean, int nthreads + ) + : num_chains(num_chains), nthreads(nthreads), + forecaster(num_chains), density_forecast(num_chains) { + std::unique_ptr reg_record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + for (int i = 0; i < num_chains; ++i) { + py::list alpha_list = fit_record[alpha_name]; + // py::list alpha_list = fit_record[alpha_name].cast(); + py::list a_list = fit_record[a_name]; + py::list d_list = fit_record["d_record"]; + if (include_mean) { + py::list c_list = fit_record["c_record"]; + reg_record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } else { + reg_record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } + Eigen::MatrixXd har_trans = bvhar::build_vhar(y.cols(), week, month, include_mean); + forecaster[i].reset(new bvhar::RegVharForecaster( + *reg_record, step, y, har_trans, month, include_mean, static_cast(seed_chain[i]) + )); + } + } + virtual ~LdltForecast() = default; + std::vector returnForecast() { + forecast(); + return density_forecast; + } + +protected: + void forecast() { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int chain = 0; chain < num_chains; ++chain) { + density_forecast[chain] = forecaster[chain]->forecastDensity(); + forecaster[chain].reset(); // free the memory by making nullptr + } + } + +private: + int num_chains; + int nthreads; + std::vector> forecaster; + std::vector density_forecast; +}; + +class LdltOutForecast { +public: + LdltOutForecast( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sparse, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : num_window(y.rows()), dim(y.cols()), num_test(y_test.rows()), num_horizon(num_test - step + 1), step(step), + lag(lag), include_mean(include_mean), sparse(sparse), + num_chains(num_chains), num_iter(num_iter), num_burn(num_burn), thin(thin), + nthreads(nthreads), chunk_size(chunk_size), seed_forecast(seed_forecast), + roll_mat(num_horizon), roll_y0(num_horizon), y_test(y_test), + model(num_horizon), forecaster(num_horizon), + out_forecast(num_horizon, std::vector(num_chains)), + lpl_record(Eigen::MatrixXd::Zero(num_horizon, num_chains)) { + for (auto ®_chain : model) { + reg_chain.resize(num_chains); + for (auto &ptr : reg_chain) { + ptr = nullptr; + } + } + for (auto ®_forecast : forecaster) { + reg_forecast.resize(num_chains); + for (auto &ptr : reg_forecast) { + ptr = nullptr; + } + } + } + virtual ~LdltOutForecast() = default; + void initialize( + const Eigen::MatrixXd& y, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + const Eigen::MatrixXi& seed_chain + ) { + initData(y); + initForecaster(fit_record); + switch (prior_type) { + case 1: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::MinnParams minn_params( + num_iter, design, roll_y0[window], + param_reg, param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::LdltInits sv_inits(param_init[chain]); + model[window][chain].reset(new bvhar::MinnReg(minn_params, sv_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 2: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::SsvsParams ssvs_params( + num_iter, design, roll_y0[window], + param_reg, grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::SsvsInits ssvs_inits(param_init[chain]); + model[window][chain].reset(new bvhar::SsvsReg(ssvs_params, ssvs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 3: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HorseshoeParams horseshoe_params( + num_iter, design, roll_y0[window], + param_reg, grp_id, grp_mat, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::HsInits hs_inits(param_init[chain]); + model[window][chain].reset(new bvhar::HorseshoeReg(horseshoe_params, hs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 4: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HierminnParams minn_params( + num_iter, design, roll_y0[window], + param_reg, + own_id, cross_id, grp_mat, + param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::HierminnInits minn_inits(param_init[chain]); + model[window][chain].reset(new bvhar::HierminnReg(minn_params, minn_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 5: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::NgParams ng_params( + num_iter, design, roll_y0[window], + param_reg, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::NgInits ng_inits(param_init[chain]); + model[window][chain].reset(new bvhar::NgReg(ng_params, ng_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 6: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::DlParams dl_params( + num_iter, design, roll_y0[window], + param_reg, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::GlInits dl_inits(param_init[chain]); + model[window][chain].reset(new bvhar::DlReg(dl_params, dl_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + } + } + py::dict returnForecast() { + forecast(); + return py::dict( + py::arg("forecast") = out_forecast, + py::arg("lpl") = lpl_record.mean() + ); + } + +protected: + virtual void initData(const Eigen::MatrixXd& y) = 0; + virtual void initForecaster(py::dict& fit_record) = 0; + virtual Eigen::MatrixXd buildDesign(int window) = 0; + virtual void runGibbs(int window, int chain) = 0; + void forecast() { + if (num_chains == 1) { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int window = 0; window < num_horizon; ++window) { + if (window != 0) { + runGibbs(window, 0); + } + Eigen::VectorXd valid_vec = y_test.row(step); + out_forecast[window][0] = forecaster[window][0]->forecastDensity(valid_vec).bottomRows(1); + lpl_record(window, 0) = forecaster[window][0]->returnLpl(); + forecaster[window][0].reset(); // free the memory by making nullptr + } + } else { + #ifdef _OPENMP + #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #endif + for (int window = 0; window < num_horizon; ++window) { + for (int chain = 0; chain < num_chains; ++chain) { + if (window != 0) { + runGibbs(window, chain); + } + Eigen::VectorXd valid_vec = y_test.row(step); + out_forecast[window][chain] = forecaster[window][chain]->forecastDensity(valid_vec).bottomRows(1); + lpl_record(window, chain) = forecaster[window][chain]->returnLpl(); + forecaster[window][chain].reset(); // free the memory by making nullptr + } + } + } + } + int num_window, dim, num_test, num_horizon, step; + int lag; + bool include_mean, sparse; + int num_chains, num_iter, num_burn, thin, nthreads, chunk_size; + Eigen::VectorXi seed_forecast; + std::vector roll_mat, roll_y0; + Eigen::MatrixXd y_test; + std::vector>> model; + std::vector>> forecaster; + std::vector> out_forecast; + Eigen::MatrixXd lpl_record; +}; + +class LdltRoll : public LdltOutForecast { +public: + LdltRoll( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sparse, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : LdltOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sparse, fit_record, + param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) {} + virtual ~LdltRoll() = default; + +protected: + void initData(const Eigen::MatrixXd& y) override { + Eigen::MatrixXd tot_mat(num_window + num_test, dim); + tot_mat << y, + y_test; + for (int i = 0; i < num_horizon; ++i) { + roll_mat[i] = tot_mat.middleRows(i, num_window); + roll_y0[i] = bvhar::build_y0(roll_mat[i], lag, lag + 1); + } + tot_mat.resize(0, 0); // free the memory + } +}; + +class LdltExpand : public LdltOutForecast { +public: + LdltExpand( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sparse, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : LdltOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sparse, fit_record, + param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) {} + virtual ~LdltExpand() = default; + +protected: + void initData(const Eigen::MatrixXd& y) override { + Eigen::MatrixXd tot_mat(num_window + num_test, dim); + tot_mat << y, + y_test; + for (int i = 0; i < num_horizon; ++i) { + roll_mat[i] = tot_mat.topRows(num_window + i); + roll_y0[i] = bvhar::build_y0(roll_mat[i], lag, lag + 1); + } + tot_mat.resize(0, 0); // free the memory + } +}; + +template +class LdltVarOut : public BaseOutForecast { +public: + LdltVarOut( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sparse, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : BaseOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sparse, fit_record, + param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) { + initialize( + y, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, seed_chain + ); + } + virtual ~LdltVarOut() = default; + +protected: + void initForecaster(py::dict& fit_record) override { + std::unique_ptr record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + py::list d_list = fit_record["d_record"]; + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + for (int i = 0; i < num_chains; ++i) { + if (include_mean) { + py::list c_list = fit_record["c_record"]; + record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } else { + record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } + forecaster[0][i].reset(new bvhar::RegVarForecaster( + *record, step, roll_y0[0], lag, include_mean, static_cast(seed_forecast[i]) + )); + } + } + Eigen::MatrixXd buildDesign(int window) override { + return bvhar::build_x0(roll_mat[window], lag, include_mean); + } + void runGibbs(int window, int chain) override { + for (int i = 0; i < num_iter; ++i) { + model[window][chain]->doPosteriorDraws(); + } + bvhar::LdltRecords record = model[window][chain]->returnLdltRecords(num_burn, thin, sparse); + forecaster[window][chain].reset(new bvhar::RegVarForecaster( + record, step, roll_y0[window], lag, include_mean, static_cast(seed_forecast[chain]) + )); + model[window][chain].reset(); // free the memory by making nullptr + } + using BaseOutForecast::initialize; + using BaseOutForecast::dim; + using BaseOutForecast::step; + using BaseOutForecast::lag; + using BaseOutForecast::include_mean; + using BaseOutForecast::sparse; + using BaseOutForecast::num_chains; + using BaseOutForecast::num_iter; + using BaseOutForecast::num_burn; + using BaseOutForecast::thin; + using BaseOutForecast::seed_forecast; + using BaseOutForecast::roll_mat; + using BaseOutForecast::roll_y0; + using BaseOutForecast::model; + using BaseOutForecast::forecaster; + using BaseOutForecast::out_forecast; + using BaseOutForecast::lpl_record; +}; + +template +class LdltVharOut : public BaseOutForecast { +public: + LdltVharOut( + const Eigen::MatrixXd& y, int week, int month, int num_chains, int num_iter, int num_burn, int thin, + bool sparse, py::dict& fit_record, + py::dict& param_reg, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : BaseOutForecast( + y, month, num_chains, num_iter, num_burn, thin, sparse, fit_record, + param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ), + har_trans(bvhar::build_vhar(dim, week, month, include_mean)) { + initialize( + y, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, seed_chain + ); + } + virtual ~LdltVharOut() = default; + +protected: + void initForecaster(py::dict& fit_record) override { + std::unique_ptr record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + py::list d_list = fit_record["d_record"]; + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + for (int i = 0; i < num_chains; ++i) { + if (include_mean) { + py::list c_list = fit_record["c_record"]; + record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } else { + record.reset(new bvhar::LdltRecords( + py::cast(alpha_list[i]), + py::cast(a_list[i]), + py::cast(d_list[i]) + )); + } + forecaster[0][i].reset(new bvhar::RegVharForecaster( + *record, step, roll_y0[0], har_trans, lag, include_mean, static_cast(seed_forecast[i]) + )); + } + } + Eigen::MatrixXd buildDesign(int window) override { + return bvhar::build_x0(roll_mat[window], lag, include_mean) * har_trans.transpose(); + } + void runGibbs(int window, int chain) override { + for (int i = 0; i < num_iter; ++i) { + model[window][chain]->doPosteriorDraws(); + } + bvhar::LdltRecords record = model[window][chain]->returnLdltRecords(num_burn, thin, sparse); + forecaster[window][chain].reset(new bvhar::RegVharForecaster( + record, step, roll_y0[window], har_trans, lag, include_mean, static_cast(seed_forecast[chain]) + )); + model[window][chain].reset(); // free the memory by making nullptr + } + using BaseOutForecast::initialize; + using BaseOutForecast::dim; + using BaseOutForecast::step; + using BaseOutForecast::lag; + using BaseOutForecast::include_mean; + using BaseOutForecast::sparse; + using BaseOutForecast::num_chains; + using BaseOutForecast::num_iter; + using BaseOutForecast::num_burn; + using BaseOutForecast::thin; + using BaseOutForecast::seed_forecast; + using BaseOutForecast::roll_mat; + using BaseOutForecast::roll_y0; + using BaseOutForecast::model; + using BaseOutForecast::forecaster; + using BaseOutForecast::out_forecast; + using BaseOutForecast::lpl_record; + +private: + Eigen::MatrixXd har_trans; +}; + +PYBIND11_MODULE(_ldltforecast, m) { + py::class_(m, "LdltForecast") + .def(py::init()) + .def(py::init()) + .def("returnForecast", &LdltForecast::returnForecast); + + py::class_>(m, "LdltVarRoll") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &LdltVarOut::returnForecast); + + py::class_>(m, "LdltVarExpand") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &LdltVarOut::returnForecast); + + py::class_>(m, "LdltVharRoll") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &LdltVharOut::returnForecast); + + py::class_>(m, "LdltVharExpand") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &LdltVharOut::returnForecast); +} \ No newline at end of file diff --git a/python/src/bvhar/_src/_ols.cpp b/python/src/bvhar/_src/_ols.cpp new file mode 100644 index 00000000..005b285d --- /dev/null +++ b/python/src/bvhar/_src/_ols.cpp @@ -0,0 +1,30 @@ +#include +#include + +PYBIND11_MODULE(_ols, m) { + m.doc() = "OLS for VAR and VHAR"; + + py::class_(m, "MultiOls") + .def(py::init()) + .def("returnOlsRes", &bvhar::MultiOls::returnOlsRes); + + py::class_(m, "LltOls") + .def(py::init()); + + py::class_(m, "QrOls") + .def(py::init()); + + py::class_(m, "OlsVar") + .def( + py::init(), + py::arg("y"), py::arg("lag") = 1, py::arg("include_mean") = true, py::arg("method") = 1 + ) + .def("returnOlsRes", &bvhar::OlsVar::returnOlsRes); + + py::class_(m, "OlsVhar") + .def( + py::init(), + py::arg("y"), py::arg("week") = 5, py::arg("month") = 22, py::arg("include_mean") = true, py::arg("method") = 1 + ) + .def("returnOlsRes", &bvhar::OlsVhar::returnOlsRes); +} diff --git a/python/src/bvhar/_src/_sv.cpp b/python/src/bvhar/_src/_sv.cpp new file mode 100644 index 00000000..feb70baf --- /dev/null +++ b/python/src/bvhar/_src/_sv.cpp @@ -0,0 +1,160 @@ +#include +#include +#include + +class SvMcmc { +public: + SvMcmc( + int num_chains, int num_iter, int num_burn, int thin, + const Eigen::MatrixXd& x, const Eigen::MatrixXd& y, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, + std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, + const Eigen::MatrixXi& grp_mat, + bool include_mean, const Eigen::VectorXi& seed_chain, + bool display_progress, int nthreads + ) + : num_chains(num_chains), num_iter(num_iter), num_burn(num_burn), thin(thin), nthreads(nthreads), + display_progress(display_progress), sv_objs(num_chains), res(num_chains) { + switch (prior_type) { + case 1: { + bvhar::MinnSvParams minn_params( + num_iter, x, y, + param_sv, param_prior, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::SvInits sv_inits(param_init[i]); + sv_objs[i].reset(new bvhar::MinnSv(minn_params, sv_inits, static_cast(seed_chain[i]))); + } + break; + } + case 2: { + bvhar::SsvsSvParams ssvs_params( + num_iter, x, y, + param_sv, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::SsvsSvInits ssvs_inits(param_init[i]); + sv_objs[i].reset(new bvhar::SsvsSv(ssvs_params, ssvs_inits, static_cast(seed_chain[i]))); + } + break; + } + case 3: { + bvhar::HsSvParams horseshoe_params( + num_iter, x, y, + param_sv, + grp_id, grp_mat, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::HsSvInits hs_inits(param_init[i]); + sv_objs[i].reset(new bvhar::HorseshoeSv(horseshoe_params, hs_inits, static_cast(seed_chain[i]))); + } + break; + } + case 4: { + bvhar::HierminnSvParams minn_params( + num_iter, x, y, + param_sv, + own_id, cross_id, grp_mat, + param_prior, + param_intercept, include_mean + ); + for (int i = 0; i < num_chains; i++ ) { + bvhar::HierminnSvInits minn_inits(param_init[i]); + sv_objs[i].reset(new bvhar::HierminnSv(minn_params, minn_inits, static_cast(seed_chain[i]))); + } + break; + } + case 5: { + bvhar::NgSvParams ng_params( + num_iter, x, y, + param_sv, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; ++i) { + bvhar::NgSvInits ng_inits(param_init[i]); + sv_objs[i].reset(new bvhar::NormalgammaSv(ng_params, ng_inits, static_cast(seed_chain[i]))); + } + break; + } + case 6: { + bvhar::DlSvParams dl_params( + num_iter, x, y, + param_sv, + grp_id, grp_mat, + param_prior, + param_intercept, + include_mean + ); + for (int i = 0; i < num_chains; ++i) { + bvhar::GlSvInits dl_inits(param_init[i]); + sv_objs[i].reset(new bvhar::DirLaplaceSv(dl_params, dl_inits, static_cast(seed_chain[i]))); + } + break; + } + } + } + virtual ~SvMcmc() = default; + std::vector returnRecords() { + fit(); + return res; + } + +protected: + void runGibbs(int chain) { + bvhar::bvharprogress bar(num_iter, display_progress); + for (int i = 0; i < num_iter; ++i) { + bar.increment(); + sv_objs[chain]->doPosteriorDraws(); + bar.update(); + } + #ifdef _OPENMP + #pragma omp critical + #endif + { + res[chain] = sv_objs[chain]->returnRecords(num_burn, thin); + } + } + void fit() { + if (num_chains == 1) { + runGibbs(0); + } else { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int chain = 0; chain < num_chains; chain++) { + runGibbs(chain); + } + } + } + +private: + int num_chains; + int num_iter; + int num_burn; + int thin; + int nthreads; + bool display_progress; + std::vector> sv_objs; + std::vector res; +}; + +PYBIND11_MODULE(_sv, m) { + py::class_(m, "SvMcmc") + .def( + py::init&, int, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, + const Eigen::MatrixXi&, bool, const Eigen::VectorXi&, bool, int>() + ) + .def("returnRecords", &SvMcmc::returnRecords); +} \ No newline at end of file diff --git a/python/src/bvhar/_src/_svforecast.cpp b/python/src/bvhar/_src/_svforecast.cpp new file mode 100644 index 00000000..018dedfc --- /dev/null +++ b/python/src/bvhar/_src/_svforecast.cpp @@ -0,0 +1,721 @@ +#include +#include +#include + +class SvForecast{ +public: + SvForecast( + int num_chains, int lag, int step, const Eigen::MatrixXd& y, + bool sv, bool sparse, py::dict& fit_record, + Eigen::VectorXi seed_chain, bool include_mean, int nthreads + ) + : num_chains(num_chains), nthreads(nthreads), sv(sv), + forecaster(num_chains), density_forecast(num_chains) { + std::unique_ptr sv_record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + for (int i = 0; i < num_chains; ++i) { + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + py::list h_list = fit_record["h_record"]; + py::list sigh_list = fit_record["sigh_record"]; + if (include_mean) { + py::list c_list = fit_record["c_record"]; + sv_record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } else { + sv_record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } + forecaster[i].reset(new bvhar::SvVarForecaster( + *sv_record, step, y, lag, include_mean, static_cast(seed_chain[i]) + )); + } + } + SvForecast( + int num_chains, int week, int month, int step, const Eigen::MatrixXd& y, + bool sv, bool sparse, py::dict& fit_record, + Eigen::VectorXi seed_chain, bool include_mean, int nthreads + ) + : num_chains(num_chains), nthreads(nthreads), sv(sv), + forecaster(num_chains), density_forecast(num_chains) { + std::unique_ptr sv_record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + for (int i = 0; i < num_chains; ++i) { + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + py::list h_list = fit_record["h_record"]; + py::list sigh_list = fit_record["sigh_record"]; + if (include_mean) { + py::list c_list = fit_record["c_record"]; + sv_record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } else { + sv_record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } + Eigen::MatrixXd har_trans = bvhar::build_vhar(y.cols(), week, month, include_mean); + forecaster[i].reset(new bvhar::SvVharForecaster( + *sv_record, step, y, har_trans, month, include_mean, static_cast(seed_chain[i]) + )); + } + } + virtual ~SvForecast() = default; + std::vector returnForecast() { + forecast(); + return density_forecast; + } + +protected: + void forecast() { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int chain = 0; chain < num_chains; ++chain) { + density_forecast[chain] = forecaster[chain]->forecastDensity(sv); + forecaster[chain].reset(); // free the memory by making nullptr + } + } + +private: + int num_chains; + int nthreads; + bool sv; + std::vector> forecaster; + std::vector density_forecast; +}; + +class SvOutForecast { +public: + SvOutForecast( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sv, bool sparse, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : num_window(y.rows()), dim(y.cols()), num_test(y_test.rows()), num_horizon(num_test - step + 1), step(step), + lag(lag), include_mean(include_mean), sv(sv), sparse(sparse), + num_chains(num_chains), num_iter(num_iter), num_burn(num_burn), thin(thin), + nthreads(nthreads), chunk_size(chunk_size), seed_forecast(seed_forecast), + roll_mat(num_horizon), roll_y0(num_horizon), y_test(y_test), + model(num_horizon), forecaster(num_horizon), + out_forecast(num_horizon, std::vector(num_chains)), + lpl_record(Eigen::MatrixXd::Zero(num_horizon, num_chains)) { + for (auto ®_chain : model) { + reg_chain.resize(num_chains); + for (auto &ptr : reg_chain) { + ptr = nullptr; + } + } + for (auto ®_forecast : forecaster) { + reg_forecast.resize(num_chains); + for (auto &ptr : reg_forecast) { + ptr = nullptr; + } + } + } + virtual ~SvOutForecast() = default; + virtual void initialize( + const Eigen::MatrixXd& y, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + const Eigen::MatrixXi& seed_chain + ) = 0; + py::dict returnForecast() { + forecast(); + return py::dict( + py::arg("forecast") = out_forecast, + py::arg("lpl") = lpl_record.mean() + ); + } + +protected: + virtual void initData(const Eigen::MatrixXd& y) = 0; + virtual void initForecaster(py::dict& fit_record) = 0; + virtual Eigen::MatrixXd buildDesign(int window) = 0; + virtual void runGibbs(int window, int chain) = 0; + void forecast() { + if (num_chains == 1) { + #ifdef _OPENMP + #pragma omp parallel for num_threads(nthreads) + #endif + for (int window = 0; window < num_horizon; ++window) { + if (window != 0) { + runGibbs(window, 0); + } + Eigen::VectorXd valid_vec = y_test.row(step); + out_forecast[window][0] = forecaster[window][0]->forecastDensity(valid_vec, sv).bottomRows(1); + lpl_record(window, 0) = forecaster[window][0]->returnLpl(); + forecaster[window][0].reset(); // free the memory by making nullptr + } + } else { + #ifdef _OPENMP + #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #endif + for (int window = 0; window < num_horizon; ++window) { + for (int chain = 0; chain < num_chains; ++chain) { + if (window != 0) { + runGibbs(window, chain); + } + Eigen::VectorXd valid_vec = y_test.row(step); + out_forecast[window][chain] = forecaster[window][chain]->forecastDensity(valid_vec, sv).bottomRows(1); + lpl_record(window, chain) = forecaster[window][chain]->returnLpl(); + forecaster[window][chain].reset(); // free the memory by making nullptr + } + } + } + } + int num_window, dim, num_test, num_horizon, step; + int lag; + bool include_mean, sv, sparse; + int num_chains, num_iter, num_burn, thin, nthreads, chunk_size; + Eigen::VectorXi seed_forecast; + std::vector roll_mat, roll_y0; + Eigen::MatrixXd y_test; + std::vector>> model; + std::vector>> forecaster; + std::vector> out_forecast; + Eigen::MatrixXd lpl_record; +}; + +class SvRoll : public SvOutForecast { +public: + SvRoll( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sv, bool sparse, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : SvOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sv, sparse, fit_record, + param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) {} + virtual ~SvRoll() = default; + void initialize( + const Eigen::MatrixXd& y, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + const Eigen::MatrixXi& seed_chain + ) override { + initData(y); + initForecaster(fit_record); + switch (prior_type) { + case 1: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::MinnSvParams minn_params( + num_iter, design, roll_y0[window], + param_sv, param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::SvInits sv_inits(param_init[chain]); + model[window][chain].reset(new bvhar::MinnSv(minn_params, sv_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 2: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::SsvsSvParams ssvs_params( + num_iter, design, roll_y0[window], + param_sv, grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::SsvsSvInits ssvs_inits(param_init[chain]); + model[window][chain].reset(new bvhar::SsvsSv(ssvs_params, ssvs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 3: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HsSvParams horseshoe_params( + num_iter, design, roll_y0[window], + param_sv, grp_id, grp_mat, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::HsSvInits hs_inits(param_init[chain]); + model[window][chain].reset(new bvhar::HorseshoeSv(horseshoe_params, hs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 4: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HierminnSvParams minn_params( + num_iter, design, roll_y0[window], + param_sv, + own_id, cross_id, grp_mat, + param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::HierminnSvInits minn_inits(param_init[chain]); + model[window][chain].reset(new bvhar::HierminnSv(minn_params, minn_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 5: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::NgSvParams ng_params( + num_iter, design, roll_y0[window], + param_sv, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::NgSvInits ng_inits(param_init[chain]); + model[window][chain].reset(new bvhar::NormalgammaSv(ng_params, ng_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 6: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::DlSvParams dl_params( + num_iter, design, roll_y0[window], + param_sv, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::GlSvInits dl_inits(param_init[chain]); + model[window][chain].reset(new bvhar::DirLaplaceSv(dl_params, dl_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + } + } + +protected: + void initData(const Eigen::MatrixXd& y) override { + Eigen::MatrixXd tot_mat(num_window + num_test, dim); + tot_mat << y, + y_test; + for (int i = 0; i < num_horizon; ++i) { + roll_mat[i] = tot_mat.middleRows(i, num_window); + roll_y0[i] = bvhar::build_y0(roll_mat[i], lag, lag + 1); + } + tot_mat.resize(0, 0); // free the memory + } +}; + +class SvExpand : public SvOutForecast { +public: + SvExpand( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sv, bool sparse, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : SvOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sv, sparse, fit_record, + param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) {} + virtual ~SvExpand() = default; + void initialize( + const Eigen::MatrixXd& y, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + const Eigen::MatrixXi& seed_chain + ) override { + initData(y); + initForecaster(fit_record); + switch (prior_type) { + case 1: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::MinnSvParams minn_params( + num_iter, design, roll_y0[window], + param_sv, param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::SvInits sv_inits(param_init[chain], roll_y0[window].rows()); + model[window][chain].reset(new bvhar::MinnSv(minn_params, sv_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 2: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::SsvsSvParams ssvs_params( + num_iter, design, roll_y0[window], + param_sv, grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::SsvsSvInits ssvs_inits(param_init[chain], roll_y0[window].rows()); + model[window][chain].reset(new bvhar::SsvsSv(ssvs_params, ssvs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 3: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HsSvParams horseshoe_params( + num_iter, design, roll_y0[window], + param_sv, grp_id, grp_mat, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::HsSvInits hs_inits(param_init[chain], roll_y0[window].rows()); + model[window][chain].reset(new bvhar::HorseshoeSv(horseshoe_params, hs_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 4: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::HierminnSvParams minn_params( + num_iter, design, roll_y0[window], + param_sv, + own_id, cross_id, grp_mat, + param_prior, + param_intercept, include_mean + ); + for (int chain = 0; chain < num_chains; chain++) { + bvhar::HierminnSvInits minn_inits(param_init[chain]); + model[window][chain].reset(new bvhar::HierminnSv(minn_params, minn_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 5: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::NgSvParams ng_params( + num_iter, design, roll_y0[window], + param_sv, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::NgSvInits ng_inits(param_init[chain], roll_y0[window].rows()); + model[window][chain].reset(new bvhar::NormalgammaSv(ng_params, ng_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + case 6: { + for (int window = 0; window < num_horizon; ++window) { + Eigen::MatrixXd design = buildDesign(window); + bvhar::DlSvParams dl_params( + num_iter, design, roll_y0[window], + param_sv, + grp_id, grp_mat, + param_prior, param_intercept, + include_mean + ); + for (int chain = 0; chain < num_chains; ++chain) { + bvhar::GlSvInits dl_inits(param_init[chain], roll_y0[window].rows()); + model[window][chain].reset(new bvhar::DirLaplaceSv(dl_params, dl_inits, static_cast(seed_chain(window, chain)))); + } + roll_mat[window].resize(0, 0); // free the memory + } + break; + } + } + } + +protected: + void initData(const Eigen::MatrixXd& y) override { + Eigen::MatrixXd tot_mat(num_window + num_test, dim); + tot_mat << y, + y_test; + for (int i = 0; i < num_horizon; ++i) { + roll_mat[i] = tot_mat.topRows(num_window + i); + roll_y0[i] = bvhar::build_y0(roll_mat[i], lag, lag + 1); + } + tot_mat.resize(0, 0); // free the memory + } +}; + +template +class SvVarOut : public BaseOutForecast { +public: + SvVarOut( + const Eigen::MatrixXd& y, int lag, int num_chains, int num_iter, int num_burn, int thin, + bool sv, bool sparse, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : BaseOutForecast( + y, lag, num_chains, num_iter, num_burn, thin, sv, sparse, fit_record, + param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ) { + initialize( + y, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, seed_chain + ); + } + virtual ~SvVarOut() = default; + +protected: + void initForecaster(py::dict& fit_record) override { + std::unique_ptr record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + py::list h_list = fit_record["h_record"]; + py::list sigh_list = fit_record["sigh_record"]; + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + for (int i = 0; i < num_chains; ++i) { + if (include_mean) { + py::list c_list = fit_record["c_record"]; + record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } else { + record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } + forecaster[0][i].reset(new bvhar::SvVarForecaster( + *record, step, roll_y0[0], lag, include_mean, static_cast(seed_forecast[i]) + )); + } + } + Eigen::MatrixXd buildDesign(int window) override { + return bvhar::build_x0(roll_mat[window], lag, include_mean); + } + void runGibbs(int window, int chain) override { + for (int i = 0; i < num_iter; ++i) { + model[window][chain]->doPosteriorDraws(); + } + bvhar::SvRecords record = model[window][chain]->returnSvRecords(num_burn, thin, sparse); + forecaster[window][chain].reset(new bvhar::SvVarForecaster( + record, step, roll_y0[window], lag, include_mean, static_cast(seed_forecast[chain]) + )); + model[window][chain].reset(); // free the memory by making nullptr + } + using BaseOutForecast::initialize; + using BaseOutForecast::dim; + using BaseOutForecast::step; + using BaseOutForecast::lag; + using BaseOutForecast::include_mean; + using BaseOutForecast::sparse; + using BaseOutForecast::num_chains; + using BaseOutForecast::num_iter; + using BaseOutForecast::num_burn; + using BaseOutForecast::thin; + using BaseOutForecast::seed_forecast; + using BaseOutForecast::roll_mat; + using BaseOutForecast::roll_y0; + using BaseOutForecast::model; + using BaseOutForecast::forecaster; + using BaseOutForecast::out_forecast; + using BaseOutForecast::lpl_record; +}; + +template +class SvVharOut : public BaseOutForecast { +public: + SvVharOut( + const Eigen::MatrixXd& y, int week, int month, int num_chains, int num_iter, int num_burn, int thin, + bool sv, bool sparse, py::dict& fit_record, + py::dict& param_sv, py::dict& param_prior, py::dict& param_intercept, std::vector& param_init, int prior_type, + const Eigen::VectorXi& grp_id, const Eigen::VectorXi& own_id, const Eigen::VectorXi& cross_id, const Eigen::MatrixXi& grp_mat, + bool include_mean, int step, const Eigen::MatrixXd& y_test, + const Eigen::MatrixXi& seed_chain, const Eigen::VectorXi& seed_forecast, int nthreads, int chunk_size + ) + : BaseOutForecast( + y, month, num_chains, num_iter, num_burn, thin, sv, sparse, fit_record, + param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, + include_mean, step, y_test, + seed_chain, seed_forecast, nthreads, chunk_size + ), + har_trans(bvhar::build_vhar(dim, week, month, include_mean)) { + initialize( + y, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, + grp_id, own_id, cross_id, grp_mat, seed_chain + ); + } + virtual ~SvVharOut() = default; + +protected: + void initForecaster(py::dict& fit_record) override { + std::unique_ptr record; + py::str alpha_name = sparse ? "alpha_sparse_record" : "alpha_record"; + py::str a_name = sparse ? "a_sparse_record" : "a_record"; + py::list alpha_list = fit_record[alpha_name]; + py::list a_list = fit_record[a_name]; + py::list h_list = fit_record["h_record"]; + py::list sigh_list = fit_record["sigh_record"]; + for (int i = 0; i < num_chains; ++i) { + if (include_mean) { + py::list c_list = fit_record["c_record"]; + record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(c_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } else { + record.reset(new bvhar::SvRecords( + py::cast(alpha_list[i]), + py::cast(h_list[i]), + py::cast(a_list[i]), + py::cast(sigh_list[i]) + )); + } + forecaster[0][i].reset(new bvhar::SvVharForecaster( + *record, step, roll_y0[0], har_trans, lag, include_mean, static_cast(seed_forecast[i]) + )); + } + } + Eigen::MatrixXd buildDesign(int window) override { + return bvhar::build_x0(roll_mat[window], lag, include_mean) * har_trans.transpose(); + } + void runGibbs(int window, int chain) override { + for (int i = 0; i < num_iter; ++i) { + model[window][chain]->doPosteriorDraws(); + } + bvhar::SvRecords record = model[window][chain]->returnSvRecords(num_burn, thin, sparse); + forecaster[window][chain].reset(new bvhar::SvVharForecaster( + record, step, roll_y0[window], har_trans, lag, include_mean, static_cast(seed_forecast[chain]) + )); + model[window][chain].reset(); // free the memory by making nullptr + } + using BaseOutForecast::initialize; + using BaseOutForecast::dim; + using BaseOutForecast::step; + using BaseOutForecast::lag; + using BaseOutForecast::include_mean; + using BaseOutForecast::sparse; + using BaseOutForecast::num_chains; + using BaseOutForecast::num_iter; + using BaseOutForecast::num_burn; + using BaseOutForecast::thin; + using BaseOutForecast::seed_forecast; + using BaseOutForecast::roll_mat; + using BaseOutForecast::roll_y0; + using BaseOutForecast::model; + using BaseOutForecast::forecaster; + using BaseOutForecast::out_forecast; + using BaseOutForecast::lpl_record; + +private: + Eigen::MatrixXd har_trans; +}; + +PYBIND11_MODULE(_svforecast, m) { + py::class_(m, "SvForecast") + .def(py::init()) + .def(py::init()) + .def("returnForecast", &SvForecast::returnForecast); + + py::class_>(m, "SvVarRoll") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &SvVarOut::returnForecast); + + py::class_>(m, "SvVarExpand") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &SvVarOut::returnForecast); + + py::class_>(m, "SvVharRoll") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &SvVharOut::returnForecast); + + py::class_>(m, "SvVharExpand") + .def( + py::init&, int, + const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::VectorXi&, const Eigen::MatrixXi&, + bool, int, const Eigen::MatrixXd&, + const Eigen::MatrixXi&, const Eigen::VectorXi&, int, int>() + ) + .def("returnForecast", &SvVharOut::returnForecast); +} \ No newline at end of file diff --git a/python/src/bvhar/datasets/__init__.py b/python/src/bvhar/datasets/__init__.py new file mode 100644 index 00000000..a5817e3a --- /dev/null +++ b/python/src/bvhar/datasets/__init__.py @@ -0,0 +1,17 @@ +import pandas as pd +from importlib.resources import files + +def load_vix(): + """Load and return the CBOE VIX of several ETF datasets + + Returns + ------- + pd.DataFrame + Dataframe for CBOE VIX series + """ + _data_path = files('bvhar.datasets.data') / 'etf_vix.csv' + return pd.read_csv(_data_path) + +__all__ = [ + "load_vix" +] \ No newline at end of file diff --git a/python/src/bvhar/datasets/data/etf_vix.csv b/python/src/bvhar/datasets/data/etf_vix.csv new file mode 100644 index 00000000..cfcb3a0d --- /dev/null +++ b/python/src/bvhar/datasets/data/etf_vix.csv @@ -0,0 +1,906 @@ +GVZCLS,OVXCLS,VXFXICLS,VXEEMCLS,VXSLVCLS,EVZCLS,VXXLECLS,VXGDXCLS,VXEWZCLS +21.47,36.52,30.19,30.09,44.47,13.19,27.53,33.51,31.27 +21.51,35.44,28.89,29.49,42.93,12.61,26.64,33.02,30.12 +22.34,35.52,29.06,29.82,43.51,13.12,27.65,33.76,30.18 +21.6,36.59,28.46,30.01,42.83,12.77,27.64,33.5,30.75 +21.2,35.62,29.54,31.13,43.48,13.31,27.8,32.81,32.7 +21.4,34.795,29.105,30.66,43.989999999999995,13.23,27.455,33.955,32.155 +21.6,33.97,28.67,30.19,44.5,13.15,27.11,35.1,31.61 +21.14,32.59,27.96,27.98,42.64,12.79,26.85,33.8,30.47 +20.34,33.51,28.88,27.79,41.04,12.66,25.3,33.04,30.79 +19.58,33.35,27.95,26.07,40.53,12.41,23.94,30.98,29.31 +20.18,33.16,27.68,26.95,41.57,12.57,22.6,30.93,29.69 +20.08,33.23,27.67,26.92,40.74,12.81,22.77,30.61,29.36 +22.24,31.93,27.35,26.91,42.97,13.12,22.95,32.84,28.65 +21.71,31.97,27.91,27.66,43.17,12.72,24.29,32.94,30.52 +20.82,31.57,27.39,27.47,42.06,13.01,24.2,32.11,29.27 +21.23,31.74,29.76,28.94,42.92,13.57,25.66,32.93,30.2 +20.93,32.31,28.8,27.97,42.47,13.48,26.34,33.11,29.63 +21.54,32.18,27.95,26.22,41.75,12.87,25.3,31.18,28.11 +21.93,31.81,27.86,26.24,42.98,12.68,24.85,30.47,28.77 +21.05,30.71,27.58,26.29,40.28,12.55,22.9,32.27,28.4 +21.2,31.2,29.38,27.58,41.22,13.1,23.99,31.57,30.28 +20.45,32.41,29.89,28.19,41.15,12.93,24.02,31.59,29.38 +20.69,32.32,29.46,28.33,39.77,12.45,24.19,31.24,29.86 +20.03,32.43,28.67,27.94,38,11.96,24.55,30.78,29.11 +19.73,32.83,31.73,30.05,37.79,12.54,27.56,32.67,31.6 +19.07,33.3,30.47,28.9,36.12,12.14,25.88,31.9,30.72 +19.24,33.64,30.19,29.69,37.12,12.57,25.82,32.82,32.11 +19.28,34.85,30.51,31.01,36.48,13.25,26.56,33.85,33.42 +19.21,33.99,29.41,28.81,36.39,13.07,24.98,33.58,32.02 +18.61,33.05,28.59,29.41,34.8,12.67,23.05,31.11,30.66 +19.275,33.864999999999995,28.82,28.89,35.489999999999995,12.245000000000001,23.045,30.13,29.65 +19.94,34.68,29.05,28.37,36.18,11.82,23.04,29.15,28.64 +19.84,33.67,27.23,26.86,36.28,11.73,22.92,29.33,27.79 +19.76,33.21,26.65,25.73,35.95,11.25,21.83,28.74,26.89 +19.01,33.8,25.63,25.14,35.11,11.27,22.21,28.81,26.02 +19.1,34.12,27.25,27.21,36.73,11.71,23.3,29.79,27.86 +20.12,33.52,26.59,26.48,37.79,11.52,23.11,30.74,27.28 +22.9,32.52,26.64,27.1,41.4,11.85,23.49,31.98,28.28 +19.53,33.2,26.41,25.79,39.73,11.6,22.19,29.61,26.97 +18.96,32.91,26.19,25.73,38.95,11.55,22.84,29.48,26.67 +19.4,33.34,27.98,26.38,40.16,11.63,23.09,30.58,28.03 +20.23,34.34,30.76,29.09,41.43,11.93,25.28,33.55,31.35 +19.18,32.93,29.74,28.29,39.79,11.59,24.11,31.68,30.22 +19.26,32.12,28.38,26.7,39.19,11.23,23,30.93,28.29 +19.41,31.42,28.42,27.05,37.41,10.94,21.83,29.53,29.12 +19.21,30,27.12,26.01,37.52,10.34,19.84,29.07,28.55 +20.47,29.45,26.16,24.71,35.76,10.2,17.67,28.67,27.31 +20.6,29.31,28.6,26.93,38.64,10.59,19.22,31.01,29.09 +18.24,29.44,26.5,24.84,34.62,10.19,19.63,29.07,26.86 +18.99,28.68,25.15,23.98,34.62,9.52,17.79,27.47,25.98 +17.83,26.85,26.18,23.92,32.52,9.95,17.43,26.7,26.05 +18.42,27.87,27.29,25.14,35.6,9.97,19.36,28.15,27.1 +18.04,26.93,26.53,24.5,35.28,9.92,19.24,28.07,25.45 +18.51,27.51,28.52,25.96,36.53,10.14,20.96,30.27,26.69 +18.3,28.17,27.42,25.1,34.17,9.84,20.66,29.49,26.35 +19.22,27.89,27.75,24.85,34.01,9.89,20.37,29.26,25.73 +18.62,27.53,27.46,25.63,33.4,9.77,21.12,29.35,26.29 +18.18,28.03,27.19,26.91,33.2,9.81,21.28,30.49,26.98 +17.43,28.94,27.91,26.57,33.58,9.96,21.89,30.58,26.52 +17.01,29,25.9,26.09,32.26,9.69,21.12,29.18,26.5 +17.1,28.46,25.42,25.24,33.01,9.82,20.92,28.87,25.44 +18.05,27.95,25.13,25.51,32.48,9.9,21.16,30.72,25.82 +18.88,28.73,26.25,26.59,35.61,10.6,22.22,33.4,26.85 +18.02,28.03,25.68,26.33,34.09,10.85,22.47,32.18,26.62 +18.615000000000002,28.935000000000002,26.905,27.145,35.14,10.84,23.485,34.18,27.515 +19.21,29.84,28.13,27.96,36.19,10.83,24.5,36.18,28.41 +19.94,31.62,29.7,30.31,36.19,11.11,26.65,37.07,30.25 +20.02,30.69,28.85,29.28,35.4,10.88,26.37,35.25,29.94 +18.64,29.41,28.2,27.23,33.63,10.16,24.52,32.91,28.14 +18.44,29.61,29.07,28.69,35.93,10.39,26.52,33.1,29.55 +18.8,29.78,29.01,28.67,36.8,10.84,26.52,34.97,29.84 +18.03,28.74,27.38,27.24,34.33,10.42,24.41,33.93,28.89 +17.54,28.36,27.11,27.39,32.71,10.25,24.82,35.01,28.15 +17.33,28.31,26.69,27.35,32.45,10.2,24.51,34.8,27.83 +16.72,27.1,25.64,25.31,31.29,9.88,23.73,33.67,26.66 +17.11,26.91,27.55,27.19,33.73,10.25,24.73,36.06,28.43 +17.64,26.11,27.28,26.24,32.65,10.06,23.94,34.13,27.21 +17.45,25.68,25.67,25.59,33.74,9.72,22.5,33.76,27.07 +16.94,24.62,24.86,24.18,30.87,9.4,21.57,31.06,26.19 +16.61,24.34,24.4,23.52,30.13,9.26,21.38,30.76,25.95 +16.93,25.15,25.5,24.82,30.45,9.64,21.72,32.65,27.21 +16.47,25.12,25.81,24.46,30.01,9.68,20.9,31.77,26.31 +16.55,25.03,25.09,24.43,30.34,10.27,22.06,31.85,25.91 +17.45,27.46,25.22,25.23,32.25,9.89,23.47,33.15,26.34 +16.32,29.79,26.11,26.6,32.06,9.97,26.15,33.78,29.44 +16.38,29.07,26.71,26.68,33.28,10.23,26.43,34.46,29.33 +17.93,29.06,27.06,28.02,34.27,10.62,27.05,36.55,30.49 +19.32,29.27,28.8,29.51,34.83,11.19,27.48,37.53,32.43 +18.25,28.61,28.69,28.53,31.98,10.63,26.65,35.64,31.96 +18.46,28.79,29.23,29.11,33.44,10.36,27.38,35.73,32.77 +20.05,30.96,32.32,31.37,35.03,11.27,29.97,40.09,37.36 +20.75,32.13,33.31,33.09,36.87,11.72,31.23,42.17,39.06 +22.2,31.48,34.72,34.85,40.79,11.99,31.81,43.24,39.43 +22.59,32.62,36.61,36.17,40.94,12.13,33.34,43.94,44.53 +23.29,33.18,36.94,37.42,40.77,12.84,33.39,42.78,43.63 +20.69,30.01,34.28,33.27,38.66,12.09,30.48,39.17,38.33 +20.04,29.75,34.95,33.56,36.09,11.75,29.99,38.95,40.3 +21.79,30.9,34.68,33.75,37.77,13.24,29.98,39.3,39.23 +21.93,30.12,34.93,33.66,38.1,13.43,29.8,39.6,38.3 +21.11,30.19,34.58,33.84,37.14,13.25,29.37,38.4,38.03 +21.415,30.455,33.980000000000004,32.995000000000005,38.07,13.120000000000001,28.985,38.864999999999995,37.615 +21.72,30.72,33.38,32.15,39,12.99,28.6,39.33,37.2 +23.46,33.32,34.76,33.75,40.11,13.98,30.99,41.6,39.31 +23.11,34.79,34.1,33.96,39.17,13.98,31.8,41.52,37.69 +25.3,41.63,37.4,36.57,40.06,13.97,34.41,41.54,40.11 +25.36,38.58,38.57,36.33,40.24,14.24,33.74,41.42,40.14 +23.99,36.67,36.13,34.09,38.67,14.25,31.86,41.17,38.76 +23.14,35.58,34.33,32.42,37.25,13.56,29.8,40.84,37.18 +22.92,36.9,33.65,32.12,36.93,12.96,28.95,40.77,36.26 +22.03,37.57,34.15,32.21,36.51,13.34,28.71,39.21,36.39 +22.19,42.31,34.72,34.41,34.85,13.59,32.1,39.74,39.28 +22.9,39.45,33.18,32.6,34.59,13.59,30.13,39.04,37.85 +23.96,39.07,34.69,35.17,35.97,13.56,32.05,40.63,40.02 +24.1,36.19,35.4,34.49,37.37,13.93,30.78,39.68,39.8 +22.94,36.28,31.73,31.59,37.57,13.63,27.39,38.44,37.15 +20.64,35.04,30.8,28.52,34.57,12.53,24.96,35.17,34.06 +20.51,32.92,29.09,26.93,34.08,11.78,23.93,34.34,31.87 +19.88,34.66,27.42,25.65,33.21,11.28,23.37,32.96,29.49 +21.07,37.52,28.87,28.75,37.74,11.7,27.11,36.74,32.26 +20.29,35.08,27.72,27.47,35.84,11.36,25.64,36.52,32.32 +20.46,36.84,30.75,29.69,36.17,11.58,27.84,38.02,34.24 +20.47,34.42,29.34,29.22,35.21,11.47,27.05,37,33.56 +19.96,33.45,28.49,28.42,35.24,11.4,25.85,36.71,32.79 +20.56,34.58,28.37,28.65,37.73,11.67,26.02,37.1,32.56 +19.53,35,26.22,25.79,35.08,10.64,23.89,34.66,29.75 +19.34,36.38,26.98,26.39,34,10.92,23.83,33.65,28.94 +25.23,36.38,26.1,26.39,33.66,10.93,23.83,35.71,28.89 +22.085,37.3,25.79,26.33,33.625,10.89,23.72,34.1,29.125 +18.94,38.22,25.48,26.27,33.59,10.85,23.61,32.49,29.36 +19.46,37.51,26.09,26.9,35.06,11.08,24.05,33.99,29.97 +19.04,37.28,27.33,27.32,34.77,10.96,24.88,34.85,30.81 +20.28,37.68,28.88,27.89,36.95,10.87,25.92,36.3,31.22 +19.22,35.97,27.55,26.62,34.82,10.76,25.12,36.4,30.34 +19.31,34.46,29.23,28.08,35.62,11.04,25.92,36.61,30.01 +18.52,33.86,26.99,26.34,34.25,10.28,23.8,34.55,28.06 +18.03,34.01,26.73,26.64,34.46,10.25,24.45,35.24,29.26 +17.7,35.36,25.76,24.7,33.75,10.15,23.42,34.78,28 +17.35,34.93,26.04,24.11,32.09,9.8,22.98,34.38,27.1 +16.83,36.74,25.93,23.92,31.21,9.92,22.75,33.2,26.41 +16.88,35.33,26.49,24.65,31.61,10.16,22.73,33.37,28.12 +18.34,37.23,29.57,28.29,33.48,11.42,25.29,35.34,32.07 +19.6,36.27,30.44,29.24,34.47,11.87,26.81,36.31,32.42 +19.64,34.86,29.71,28.27,33.7,11.65,25.77,36.41,32.02 +19.37,34.1,28.06,25.99,32.92,11.07,23.41,34.85,28.89 +19.11,32.99,26.75,25.27,32.27,11.12,22.56,33.15,28.83 +19.43,33.09,27.23,26.22,33.34,11.6,23.87,33.13,29.59 +19.51,34.53,27.5,26.96,33.05,11.66,25.56,34.18,31.31 +19.01,33.98,27.81,27.48,32.55,11.89,24.56,35.16,31.71 +18.05,33.6,27.68,27.09,30.9,11.43,24.24,35.9,31.76 +16.65,33.23,25.45,23.9,29.76,10.12,22.02,33.68,28.53 +16.35,32.07,25.19,24.59,29.78,10.19,22.07,33.94,29.33 +16.48,32.15,24.69,24.29,29.47,9.8,21.82,32.81,29.15 +16.25,31.42,24.02,23.72,29.3,9.98,21.7,32.33,27.85 +16.19,30.78,24.05,23.55,27.45,9.94,21.39,31.88,27.73 +16.16,30.59,22.99,22.91,27.15,9.64,20.55,30.53,27 +15.13,29.45,21.97,22.23,25.5,9.27,18.92,30.25,25.26 +14.96,29.84,22.73,22.54,26.01,9.24,19.47,30.53,26.81 +14.87,31.18,22.25,21.62,25.77,9.56,19.12,28.83,26.34 +15.17,31.73,22.78,21.62,27.44,9.46,20.09,29.28,25.66 +14.7,31.19,20.92,20.79,26.28,9.32,18.55,27.58,25.03 +14.78,30.5,22.6,21.17,28.04,9.67,19.8,27.78,26.98 +16.29,30.64,23.25,22.18,27.42,9.6,20.93,28.11,26.9 +17.15,31.07,23.51,21.64,28.63,9.66,20.23,29.04,27.64 +17.94,31.76,23.33,22.64,31.54,9.73,21.63,30.08,27.61 +16.88,32.01,23.81,22.48,31.11,9.88,20.96,29.33,28 +16.9,33.42,25.39,24.04,32.25,9.92,21.4,30.05,28.89 +16.54,33.06,25.21,24.19,32.35,10.39,22.14,30.62,29.24 +16.5,32.59,25.74,25,31.97,10.83,22.78,30.16,30.97 +17.14,31.95,26.87,25.95,32.16,10.97,23.98,31.64,31.27 +17.89,31.4,25.93,25.27,33.22,10.8,23.36,31.46,30.28 +17.93,32.504999999999995,26.735,25.975,35.01,10.75,23.869999999999997,31.865000000000002,31.07 +17.97,33.61,27.54,26.68,36.8,10.7,24.38,32.27,31.86 +18.18,33.64,27.11,26.28,35.12,10.68,24.3,32.21,31.11 +17.8,34.07,24.95,24.02,34.68,10,21.64,30.17,28.52 +18.95,32.77,23.47,22.29,35.4,9.07,19.99,29.55,28.49 +19.08,32.71,24.86,23.62,36.14,9.25,20.96,30.94,29.15 +19.59,32.38,24.81,23.73,36.12,9.5,21.31,32,29.16 +20.15,33.33,25.11,23.58,38.13,9.26,21.14,32.02,29.46 +19.03,32.96,24.62,22.55,35.73,9.15,20.31,31.54,26.91 +17.87,34.07,24.01,22.87,33.91,9.62,20.16,31.27,26.62 +18.08,35.64,23.8,23.2,34.88,9.58,20.41,31.82,27.27 +17.89,35.81,23.74,22.16,34.35,9.52,21.05,31.75,26.47 +17.69,36.58,22.58,22.18,33.76,9.1,21,31.88,26.05 +17.29,33.58,22.92,21.37,33.39,9.29,20.01,32,25.91 +17.09,33.21,21.62,20.24,34.06,9.29,19.52,31.65,25.16 +16.94,34.23,22.35,20.37,34.66,9.5,19.57,31.94,24.95 +16.74,34.35,23.57,22.84,33.03,9.48,20.1,33.31,26.89 +16.31,35.13,24.24,23.61,32.04,9.75,21.29,34.93,27.13 +17.02,33.05,23.31,21.81,31.41,9.47,19.97,33.83,26.07 +16.76,32.45,23.15,22.71,31.41,9.56,19.74,33.64,26.69 +16.9,32.87,24.2,23.31,31.27,9.58,20.81,33.36,26.59 +16.34,32.53,23.95,22.46,31.18,9.47,20.44,32.46,26.14 +16.59,35.15,23.94,22.18,30.57,9.45,20.67,32.84,25.82 +17.27,33.39,23.36,21.77,30.2,9.54,19.89,32.81,25.67 +15.88,32.76,22.74,21.17,29.28,9.03,19.74,32.76,24.93 +16.13,34.05,23.06,21.78,30.19,9.35,20.16,33.18,25.17 +15.72,34.5,24.44,23.2,30.08,9.34,20.87,34.18,25.39 +15.53,35.09,24.44,23.41,29.82,9.1,21.37,33.52,25.36 +15.31,34.52,23.69,22.27,28.97,8.9,20.33,32.53,24.41 +15.72,33.69,23.38,22.29,28.48,8.84,20.32,32.63,24.82 +15.47,33.84,23.21,21.48,28.82,8.95,19.83,32.65,23.54 +15.12,33.54,23.58,20.53,28.15,8.99,19.17,31.59,24.21 +14.64,32.25,23.67,20.03,27.26,9.03,19.07,31.24,23.76 +14.73,30.83,22.54,20.31,27.55,8.91,19.25,31.93,23.49 +15.1,31.91,23.1,22.35,29.16,8.95,20.46,33.06,23.8 +14.88,33.13,22.93,22.52,29.25,8.95,21.03,30.92,23.87 +15.8,34.28,25,24.8,30.63,9.11,23.2,32.82,26.33 +15.7,33.61,24.51,24.98,30.17,9.03,23.3,33.76,26.68 +15.36,32.27,23.73,24.2,27.94,8.81,23,33.49,25.48 +14.92,32.21,24.29,24.12,27.54,8.71,23,33.99,25.1 +14.943333333333333,32.2,23.903333333333332,24.186666666666667,27.44333333333333,8.656666666666668,23.076666666666668,33.82333333333334,25.18 +14.966666666666667,32.19,23.516666666666666,24.253333333333334,27.346666666666668,8.6033333333333335,23.153333333333332,33.656666666666666,25.26 +14.99,32.18,23.13,24.32,27.25,8.55,23.23,33.49,25.34 +14.49,31.29,22.99,22.8,26.91,8.5,21.77,32.94,23.88 +16.2,32.42,23.66,22.61,29.69,8.45,22.57,34.22,24.65 +17.48,32.1,24.53,23.28,30.14,8.71,23.76,35.95,25.2 +18,32.02,23.65,22.84,30.55,8.64,23.01,35.32,24.67 +16.69,33.98,24.32,23.57,27.94,8.67,24.16,34.94,24.53 +17.02,33.51,25.61,24.95,28.69,8.45,24.8,34.06,25.81 +16.07,31.54,25.21,24.56,27.91,8.35,24.3,33.48,26.28 +15.66,30,24.26,23.37,27.62,8.35,21.95,32.32,24.55 +14.54,31.27,24.3,23.43,27.11,8.13,22.31,32.28,26.19 +14.94,31.5,24.7,23.8,26.58,7.59,23.09,37.33,26.65 +14.26,30.54,24.97,24.31,25.72,7.45,23.4,35.5,26.81 +13.28,31.1,24.28,23.63,25.43,7.42,21.89,34.79,25.03 +13.61,30.97,22.5,20.84,23.87,7.41,19.82,33,23.42 +13.52,29.83,22.57,21.31,22.76,7.19,19.71,30.8,23.1 +13.22,29.49,22.32,20.88,23.33,7.22,19.54,30.13,22.67 +14.350000000000001,29.49,22.32,20.564999999999998,23.33,7.22,19.54,30.13,22.090000000000003 +15.48,29.49,22.32,20.25,23.33,7.22,19.54,30.13,21.51 +13.18,29.38,22.94,21.44,24.68,7.66,20.21,30.95,22.24 +13.68,28.65,23.22,21.74,24.9,7.78,20.66,31.45,23.02 +13.7,28.69,23.65,22.16,25.04,7.77,20.25,31.13,23.46 +12.67,28.28,22.81,21.83,25.13,7.77,19.92,30.59,22.82 +13.24,27.62,22.46,22.49,25.94,7.85,20.11,31.32,23.29 +13.4,28.39,23.4,22.76,26.64,7.91,21.25,32.62,24.15 +13.95,28.84,23.53,22.8,26.35,8.02,22.05,32.12,24.12 +14,29.13,24.31,22,25.92,7.84,21.22,32.6,23.75 +14.07,29.89,23.27,21.8,25.51,7.83,20.71,32.3,22.95 +13.87,29.65,22.36,21.06,24.53,7.84,20.31,31.93,21.83 +14.2,30.32,23.95,21.43,25.71,7.68,20.25,32.82,22.04 +14.16,29.94,23.04,21.12,26.14,7.58,20.29,31.83,21.83 +13.34,29.13,23.04,21.14,24.72,7.49,20,31.65,22.36 +13.01,29.63,23.45,21.22,26.54,7.42,20.36,32,22.93 +12.6,29.32,23.81,21,25.72,7.19,20.52,31.74,22.94 +11.97,28.95,23.21,20.67,24.53,7.01,20.21,31.27,22.98 +13.69,28.56,22.9,20.04,25.43,6.99,19.41,31.98,22.09 +13.73,27.64,23.52,20.28,25.75,7.43,20.99,31.65,22.56 +14.53,27.61,23.04,20.03,27.28,7.74,20.36,32.37,22.48 +13.82,28.88,24.19,20.53,27.32,8.22,21.78,29.76,22.59 +13.21,28.88,24.19,20.53,21.25,8.22,21.78,29.76,22.59 +13.385000000000002,28.805,24.085,21.12,24.5,8.445,21.97,30.54,22.615000000000002 +13.56,28.73,23.98,21.71,27.75,8.67,22.16,31.32,22.64 +14.83,28.08,23.66,21.26,27.99,8.74,22.82,31.28,22.98 +15.3,28.88,25.42,23.04,27.65,8.78,24.63,31.35,24.74 +14.38,28.45,24.53,21.7,28.08,8.97,22.49,39.12,23.67 +13.905000000000001,28.125,23.315,20.72,27.185,8.545,20.33,33.705,21.815 +13.43,27.8,22.1,19.74,26.29,8.12,18.17,28.29,19.96 +13.72,26.83,21.52,18.78,26.53,8.09,17.91,28.75,19.92 +13.78,26.03,20.41,17.82,26.61,7.76,17.18,27.65,19.38 +14.51,25.04,20.87,18.53,27.17,7.68,17.61,28.87,19.73 +14.08,24.3,21.06,18.23,26.59,7.51,17.36,27.91,19.64 +14.16,24.6,20.4,17.82,25.84,7.73,17.39,27.47,19.11 +13.27,24.59,20.29,17.45,25.78,7.64,16.96,27.29,19.02 +13.79,24.57,20.26,18.16,25.64,8.06,16.55,26.48,19.32 +13.78,25.72,19.39,16.54,26.59,8.19,16.43,26.21,18.66 +13.76,25.7,19.88,17.25,26.69,8.62,16.09,25.63,18.43 +14.49,24.39,20.66,17.24,27.24,8.56,16.23,26.24,18.96 +14.9,23.99,20.74,17,27.08,9.19,16.42,27.09,18.62 +13.8,21.98,19.16,15.85,25.38,9.19,15.01,25.76,17.48 +13.255,21.825000000000003,19.275,16.044999999999998,24.72,8.879999999999999,15.39,25.3,17.605 +12.71,21.67,19.39,16.24,24.06,8.57,15.77,24.84,17.73 +12.46,22.75,18.56,15.69,23.13,8.41,15.7,25.8,17.72 +13.03,22.48,18.87,16.21,22.72,8.59,15.96,27.38,18.39 +14.08,22.52,18.93,16.46,24.01,8.28,16.21,27.99,18.36 +14.39,22.98,19.63,18.19,25.17,8.23,17.31,28.78,19.62 +13.59,22.97,19.72,18.05,23.57,8.17,17.02,27.12,19.5 +13.21,22.88,19.76,18.68,24.28,8.39,17.87,28.16,20.1 +13.76,22.54,20.19,19.24,24.83,8.2,18.52,28,19.71 +13.32,22.4,19.38,18.14,23.36,8.51,17.4,27.99,18.86 +13.78,23.96,20.69,19.95,24.4,9.28,19.21,28.01,20.39 +13.03,23.06,20.13,18.4,24.41,9.33,18.36,26.24,19.65 +13.57,23.39,19.74,18.75,24.17,9.69,18.25,25.93,19.52 +13.59,23.3,20.34,18.72,24.74,9.11,18.71,26.23,19.67 +13.14,23.08,19.41,17.85,23.61,8.67,18.11,25.68,19.01 +14.38,21.68,19.76,17.7,25.28,8.67,17.72,25.66,18.3 +14.06,20.7,18.81,16.62,25.1,8.74,17.42,25.83,17.71 +14.7,21,18.66,15.84,24.79,8.59,17.29,25.59,17.46 +14.72,20.21,18.78,15.84,25.32,8.87,17.67,25.84,17.46 +15.9,22.59,18.49,15.33,25.33,8.83,17.6,26.72,16.91 +15.825,22.515,19.025,15.165,26,8.905000000000001,17.15,27.395,16.79 +15.75,22.44,19.56,15,26.67,8.98,16.7,28.07,16.67 +18.59,24.97,20.44,16.47,29.22,9.09,18.66,31.72,18.14 +16.9,27.13,21.23,18.2,28.41,9.56,19.74,31.72,19.39 +16.26,24.58,21.04,17.67,28.95,9.6,18.64,31.67,19.31 +17.48,26.22,23.8,21.86,29.72,11.1,22.87,33.68,21.13 +15.61,25.9,23.4,20.25,26.4,10.43,21.33,32.01,20.36 +15.57,24.33,21.82,18.79,26.73,9.84,19.48,30.79,18.87 +15.79,23.61,21.21,18.42,26.51,9.72,19.5,30.76,18.71 +15.75,23.68,21.68,18.72,26.7,9.71,19.54,30.55,19.04 +16.03,24.28,22.77,18.91,27.2,9.53,19.48,32.05,19.27 +15.47,22.41,22.3,17.63,26.41,9.08,18.32,30.39,18.72 +15.05,22.52,21.59,17.37,25.12,8.9,17.86,30.08,18.29 +14.84,21.29,20.8,16.52,24.76,8.56,17.14,29.79,18 +13.54,20.8,20.53,15.26,23.22,8.5,16.46,29.6,17.45 +12.96,19.42,21.62,15.4,22.57,8.46,14.88,28.15,17.35 +12.07,18.77,21.02,15.77,21.64,8.33,15.09,28.33,17.31 +12.24,19.09,21.82,16.6,21.93,8.64,15.06,29.14,18.36 +12.59,18.36,21.08,16.11,22.22,8.52,14.61,27.94,18.37 +12.43,18.76,21.02,15.97,21.68,8.39,15.47,27.18,17.74 +13.3,19.45,22.39,18.53,22.51,9.4,17.81,27.97,19.29 +14.52,21.14,23.3,19.1,24.35,10.16,18.62,29.34,20.35 +13.33,19.58,22.95,18.09,22.68,9.5,17.3,28.93,20.14 +13.37,19.74,23.89,20.15,22.95,9.97,18.38,28.7,21.65 +12.89,19.11,23.22,19.08,22.93,10.43,17.84,28.16,21.76 +12.99,19.08,22.81,18.69,22.25,9.99,17.64,28.84,20.51 +12.48,17.81,21.8,16.86,21.38,9.82,16.52,27.19,19.28 +12.33,17.95,21.3,16.63,20.9,9.92,16.7,27.24,18.5 +12.41,18.19,21.49,16.72,20.86,9.36,16.78,26.7,18.01 +12.77,18.695,21.95,17.45,22.005,9.515,17.03,27.465,18.590000000000003 +13.13,19.2,22.41,18.18,23.15,9.67,17.28,28.23,19.17 +13.73,19.33,22.03,17.77,24.16,9.45,17.41,29.26,19.07 +15.21,21.74,23.05,18.36,25.62,9.24,18.84,32.33,19.14 +16.03,21.84,22.84,19.2,26.01,8.88,19.22,31.9,19.51 +14.46,21.39,23.39,19.15,24.43,8.43,19.36,34.8,19.65 +14.22,21.01,23.74,18.75,23.59,8.54,18.79,31.86,19.6 +13.54,20.64,23.23,18.02,23.25,8.52,18.32,30.85,19.1 +15.15,20.55,22.84,17.67,24.67,8.62,17.99,32.05,19.42 +15.29,21.2,22.65,17.23,23.99,8.46,17.71,32.62,19.5 +21.32,22.59,22.79,17.92,31.18,8.52,18.09,35.93,19.73 +34.48,29.84,25.85,22,54.92,9.01,23.66,45.41,23.61 +30.02,27.32,24.44,20.09,43.77,8.69,20.81,42.14,21.54 +26.93,31.14,27.18,22.46,39.35,9.2,23.18,51.29,24.04 +24.57,28.53,26.29,23.2,38.38,9.13,23.86,50.02,24.16 +24.37,27.57,24.43,20.71,37.35,8.96,22.5,46.17,22.28 +23.28,25.07,24.39,19.68,36.04,9.19,20.88,43.66,22.18 +23.68,24.43,23.83,18.82,38.1,9.05,19.96,42.56,21.35 +21.49,23.26,23.24,18.79,34.67,8.79,19.65,40.34,20.77 +22.35,22.68,23.48,18.61,33.64,8.79,19.75,40.4,20.37 +24.21,22.51,23.24,18.88,36.4,8.84,19.73,41.62,21.24 +23.16,23.44,22.7,18.93,35.48,8.67,19.37,41.16,20.41 +22.09,24.57,21.93,18.53,34.29,8.68,19.31,39.9,20.02 +22.93,26.68,23.28,20.27,36.62,8.73,20.67,40.27,20.98 +22.06,24.49,22.66,19.29,33.73,8.75,19.91,51.46,20.74 +20.37,24.01,21.61,18.19,32.09,7.99,18.87,38.62,20.22 +20.37,25.16,21.95,18,31.35,8.29,18.94,38.73,20.09 +22.07,24.38,20.88,17.77,32.67,8.21,18.9,40.97,19.53 +20.64,22.64,21.2,18.08,32.05,8.12,18.87,37.6,19.6 +21.62,23.24,21.62,18.54,32.18,8.48,19.07,39.24,20.06 +23.95,22.83,21.39,19.59,32.48,8.74,19.03,41.2,20.59 +25.04,23.66,21.35,19.36,33.59,8.58,18.52,43.07,20.41 +25.06,24.12,21.43,19.31,34.91,8.65,18.31,43.03,20.21 +28.01,24.48,20.77,19.41,41.88,9.13,18.77,45.58,19.84 +27.18,24.39,20.17,18.87,38.33,8.87,18.76,43.94,19.29 +28.34,22.82,19.66,18.32,38.88,9,18.28,43.55,18.06 +26.66,23.44,21.27,18,40.69,8.97,18.09,42.55,18.42 +27.18,23.11,20.98,17.49,42.21,9.06,18.62,42.39,18.25 +27.61,24.45,22.98,18.92,44.04,9.18,19,42.78,19.85 +26.08,23.3,23.3,20.41,41.11,9.17,19.84,42.21,20.48 +24.9,22.57,24.47,21.47,39.23,9.03,19.55,45.87,20.66 +24.88,22.759999999999998,24.08,20.814999999999998,38.91,9.149999999999999,19.5,44.114999999999995,20.95 +24.86,22.95,23.69,20.16,38.59,9.27,19.45,42.36,21.24 +22.76,24.04,24.65,21.78,36.27,9.4,19.92,42,23.77 +21.43,24.21,24.2,21.47,35.02,9.34,20.1,42.66,23.67 +22.63,24.41,25.22,23.68,36.02,9.45,21.23,42.58,26.78 +21.68,24.79,24.69,22.79,35.59,9.67,21.32,43.45,26.03 +22.32,24.64,24.83,23.34,35.17,9.37,21.64,42.77,26.59 +22.65,24.49,25.45,24.99,35.34,9.09,22.66,43.9,27.95 +22.33,23.89,26.04,24.83,35.19,9.06,22.52,42.69,27.92 +23.58,22.41,25.74,24.32,41.16,8.54,20.7,43.42,28.48 +22.68,21.74,25.48,23.76,36.49,8.82,20.72,41.73,28.04 +23.27,21.81,26.89,26.28,38.29,8.87,22.31,49.07,30.75 +22.04,22.72,27.97,27.98,36.82,8.99,23.43,43.42,32.59 +22.69,21.47,27.23,26.05,36.8,9.16,21.46,42.56,30.87 +20.91,21.22,28.48,26.97,34.85,8.77,21.97,41.92,31.35 +21.27,21.04,28.35,27.29,35.57,8.72,21.35,41.1,33.34 +22.7,20.64,28.24,26.19,36.8,8.89,20.73,43.75,32.67 +22.53,19.98,27.63,26.5,36.27,8.86,19.99,37.87,33.85 +29.94,25.9,35.18,35.48,47.77,9.61,24.93,44.43,40.35 +27.19,23.68,31.58,33.61,43.04,9.52,23.3,45.79,38.47 +27.69,24.32,35.21,38,43.05,10.03,23.89,50.24,40.02 +25.86,23.63,34.31,32.35,40.29,9.64,22.68,48.01,37.43 +31.43,22.82,31.69,28.52,45.7,9.94,20.98,50.26,35.14 +33.51,23.17,30.61,28.29,47.87,9.49,20.63,53.08,32.9 +31.84,22.37,29.62,27.61,46.26,9.41,20.61,53.26,32.25 +28.84,22.18,28.63,25.64,42.75,9.33,20.29,49.18,31.52 +28.19,22.63,30.64,30.18,40.82,9.6,20.13,50.21,35.04 +27.25,25.7,31.93,30.77,40.71,9.67,20.08,48.83,35.36 +27.2,26.22,31.369999999999997,30.165,41.955,9.375,19.67,49.925,35.33 +27.15,26.74,30.81,29.56,43.2,9.08,19.26,51.02,35.3 +26.79,25.37,30.41,28.79,41.69,8.88,18.79,53.11,34.69 +25.68,25.22,28.83,25.69,39.59,9.15,18.07,50.21,32.11 +26.24,26.22,28.29,27,38.69,9.15,18.09,49.99,32.06 +23.85,24.58,27.43,25.6,35.89,9.71,17.31,46.15,30.6 +24.07,24.51,28.6,27.29,34.79,9.38,16.84,47.42,31.43 +23.53,23.72,27.75,26.21,34.85,9.69,16.69,49.15,28.89 +23.2,24.5,28.35,26.98,34.27,9.38,17.32,47.68,30.16 +23.61,23.3,28.3,25.98,36.22,8.76,16.87,47.16,29.79 +22.52,23.05,27.22,26.33,34.74,8.62,16.92,46.07,29.23 +21.19,24.08,26.3,24.7,33.94,8.45,16.29,44.34,28.91 +21.76,24.23,25.35,23.19,34.78,8.08,16.72,46.13,25.88 +21.66,23.63,24.05,22.09,33.92,8.14,16.85,45.55,25.42 +22.61,23.77,24.48,22.43,34.16,8.55,17.42,47.41,27.42 +21.77,23.37,23.71,21.51,33.57,8.51,17,46.74,25.77 +22.22,22.98,23.24,21.3,33.69,8.29,16.8,46.51,26.12 +22.77,22.79,24.76,23.08,34.55,8.77,17.89,47.65,27.29 +23.72,23.48,24.8,23.21,34.98,9.12,17.92,47.85,27.58 +24.99,22.95,24.59,23.26,36.13,8.92,18.05,47.92,27.57 +24.61,22.28,23.96,22.04,35.63,8.74,17.27,47.08,26.54 +24.24,22.26,23.53,20.84,33.45,8.13,16.57,46.36,25.71 +23.49,22.87,22.91,20.45,32.83,8.18,16.52,45.56,26.53 +23.82,22.64,23.25,22.12,33.49,8.05,17.1,48.18,26.77 +23.49,22.56,24.56,23.35,32.9,8.03,17.25,49.17,26.95 +21.71,22.25,24.91,23.06,32.17,7.74,16.53,47.96,26.95 +21.5,22.64,24.04,22.81,33.29,7.59,16.88,46.06,26.27 +21.58,22.68,22.92,21.74,34.19,7.56,15.82,47.24,27.84 +21.37,22.63,23.68,21.43,33.55,7.84,15.67,46.16,27.6 +21.21,23.15,23.57,21.92,33.82,7.67,16.4,47.46,28.25 +22.45,24.15,24.21,23.1,39.62,7.87,17.81,49.52,28.74 +22.97,23.99,24.61,23.62,38.95,7.69,17.38,48.7,30.04 +24.25,23.48,24.93,24.57,38.14,7.94,18.65,48.79,31.76 +23.82,24.18,25.37,24.45,37.99,8.13,18.2,47.55,31.35 +23.28,24.33,26.38,26.02,37.91,8.33,18.77,47.83,31.64 +22.69,23.26,25.31,24.62,36.58,8.22,18.12,47.28,30.56 +22.63,22.7,24.77,23.24,38.77,8.14,17.18,46.59,29.01 +23.67,23.43,25.29,25.25,40.06,8.38,18.7,47.57,31.57 +23.9,27.19,27.21,28.55,40.93,8.53,19.91,49.56,32.72 +25.89,29.79,27.93,28.21,41.16,8.63,19.36,49.59,32.41 +24.36,27.41,28.13,28.01,40.32,8.9,20.57,48.22,32.61 +24.44,28.1,27.47,28.14,40.87,8.92,21.19,47.42,32.18 +24.82,28.755000000000003,27.835,28.25,41.55,8.96,21.05,47.495000000000005,32.075 +25.2,29.41,28.2,28.36,42.23,9,20.91,47.57,31.97 +24.61,28.73,27.44,26.68,41.95,8.63,19.88,45.02,30.67 +25.64,27.44,26.55,25.89,41.81,8.67,19.31,46.95,29.26 +23.59,28.1,26.38,26.22,39.78,8.07,19.48,45.11,29.49 +22.04,27.93,26.69,26.29,38.24,8.06,19.19,45.58,29.53 +22.58,27.12,25.69,25.89,38.77,7.82,18.76,45.94,29.65 +22.63,24.32,25.09,25.05,37.11,7.78,17.16,44.88,28.42 +25.09,23.57,24.98,25.16,40.73,7.73,17.64,46.89,27.96 +24.88,23.18,25.07,24.29,40.78,7.8,17.25,45.53,27.57 +25.12,23.03,25.55,24.35,40.78,7.85,17.71,46.25,28.56 +25.35,23.17,26.01,24.26,40.97,7.83,17.43,47.17,29.47 +23.2,22.81,25.03,22.87,41.13,7.17,17.12,46.95,26.96 +21.75,21.51,23.71,21.81,37.57,7.48,16.41,48.02,26.33 +22.27,21.38,24.8,23.55,38.14,7.72,16.46,50.33,27.55 +22.7,22.27,24.38,23.09,38.95,7.62,17.03,49.28,26.27 +22.84,22.5,24.21,22.36,37.96,7.6,16.92,48.75,25.37 +21.97,22.32,23.67,22.49,37.09,7.53,16.83,47.22,25.39 +21.79,21.77,22.9,22.42,36.21,7.39,16.82,46.3,25.17 +22.57,21.84,23.07,23.44,36.39,7.51,17.7,45.94,26.05 +23.72,22.98,25.51,25.73,38.03,7.95,19.55,46.6,27.66 +25.48,22.57,25.38,24.44,38.96,8.09,18.96,47.88,26.81 +24.04,21.52,24.87,24.71,37.64,7.88,18.92,48.01,26.46 +24.54,21.62,25.24,25.41,37.73,7.89,19.85,47.94,27.37 +24.77,21.49,24.83,24.41,36.98,7.83,19.28,46.77,26.92 +24.81,22.01,26.73,27.11,38.29,8.04,21.52,46.81,28.55 +25.44,21.95,27.59,28.75,38.1,8.05,23.26,48.25,29.25 +25.81,23.24,27.03,28.08,38.47,8.08,23.02,48.38,28.85 +25.36,21.39,25.38,24.97,37.52,7.53,19.94,46.25,27.1 +25.83,21.55,24.63,23.73,37.97,7.32,18.85,45.76,25.45 +26.45,22.52,24.88,24.2,38.31,7.42,19.14,45.53,25.69 +26.07,23.52,26.31,25.61,38.09,7.41,20.44,45,26.38 +24.72,21.73,24.46,23.25,36.62,7.62,17.53,42.98,24.92 +22.31,21.22,22.69,21.75,35.71,7.43,16.45,42.7,22.97 +21.43,20.12,21.46,19.98,34.96,7.41,16.16,42.27,22.09 +21.52,20.91,20.95,18.86,35.01,7.44,16.64,40.19,21.78 +20.75,21.62,21.65,19.26,34.59,7.39,17.09,41.6,21.45 +20.52,23.28,23.52,21.58,33.66,7.41,18.11,42.11,23.25 +20.67,21.88,24.4,21.5,33.19,7.4,17,43.91,23.98 +19.9,20.28,23.98,21.01,32.66,7.47,16.21,42.91,23.82 +20.32,20.57,23.86,20.86,32.71,7.59,16.71,43.53,24.03 +20.65,20.64,22.94,20.82,32.07,7.74,16.39,42.2,23.59 +20.81,20.95,23.15,21.59,31.32,7.67,16.98,41.31,23.65 +20.57,20.59,22.38,21.83,31.93,8.12,16.92,42.12,24.44 +20.68,21.29,23.15,21.67,31.22,8.36,17.21,42.82,25.42 +20.3,21.74,22.62,21.91,30.94,8.33,16.67,42.26,25.72 +20.19,22.18,24.27,23.83,30.43,8.61,16.93,42.01,27.59 +19.43,20.98,23.94,23.46,30.23,8.59,16.79,40.91,27.59 +20.01,20.79,24.63,24.79,30.44,8.24,18.29,41.88,28.85 +19.37,19.7,23.96,23.83,30.23,7.82,17.07,40.3,28.59 +19.15,19.69,23.84,23.48,30.47,7.81,16.51,42.09,28.51 +21.2,20.32,23.08,23.45,33.46,7.84,17.36,41.64,28.65 +20.46,19.63,24.04,23.05,33.74,7.65,16.53,41.13,27.75 +19.44,19.92,23.35,21,32.92,7.47,16.38,40.1,26.79 +18.03,19.39,24.93,21.74,30.02,7.2,16.1,38.98,26.78 +19.52,19.71,28.17,23.74,31.42,7.11,16.81,39.1,27.42 +19.45,19.83,27.11,23.34,31.02,7.25,16.92,38.33,28.01 +23.04,19.87,26.72,24.12,34.17,7.66,16.75,41.6,28.94 +21.3,18.69,26.43,24.16,32.25,7.52,16.21,41.02,29.05 +20.33,18.1,25.6,23.36,30.27,7.45,15.34,39.99,28.63 +20.02,19.23,25.46,24.38,30.04,7.76,16.24,41.63,29.09 +19.68,18.64,25.22,23.67,29.48,7.64,16.22,42.19,28.87 +19.75,19.62,25.51,23.17,30.26,7.89,16.61,41.03,28.97 +19.54,19.810000000000002,25.020000000000003,22.96,30.200000000000003,7.855,16.68,41.215,28.384999999999998 +19.33,20,24.53,22.75,30.14,7.82,16.75,41.4,27.8 +21.43,20.28,25.37,24.8,34.88,8.08,17.33,45.5,28.91 +21.09,19.49,26.08,26.11,34.34,8.13,17.9,47.04,29.16 +20.82,19.06,26.18,26.02,33.31,8.13,18.13,46.89,28.82 +21.78,18.59,26.04,26.49,34.25,7.84,18.77,47.15,29.18 +20.5,17.95,25.4,25.02,32.31,7.33,17.59,45.61,27.55 +20.65,18.1,25.97,24.87,31.91,7.43,17.19,42.99,27.02 +19.63,17.95,24.58,24.44,32.43,7.42,17.29,42.74,26.56 +20.02,17.98,25.37,26.01,31.91,7.47,18.47,43.34,27.9 +22.24,18.19,25.4,25.6,33.5,7.72,18.25,44.34,27.9 +21.87,18.42,24.31,25.33,33,7.7,18.63,43.45,27.46 +21.91,17.49,24.43,25.38,35.41,7.67,18.17,43.81,27.24 +21.79,17.7,24.59,24.94,34.61,7.6,18.31,43.41,27.81 +21.39,17.26,22.56,21.16,32.54,7.35,15.83,42.33,26.06 +22.07,16.7,23.54,20.97,33.51,7.09,15.48,40.25,24.88 +21.01,15.86,22.31,21.53,32.88,7.29,15.04,38.8,24.59 +20.76,15.42,21.62,20.44,31.5,7.37,14.44,39.67,23.74 +20.08,15.26,21.62,19.95,30.89,7.34,14.48,37.53,23.13 +19.96,15.23,21.735,20.345,30.759999999999998,7.475,14.695,37.565,23.14 +19.84,15.2,21.85,20.74,30.63,7.61,14.91,37.6,23.15 +19.1,15.74,21.67,20.5,30.39,8.05,14.74,37.45,22.67 +20.75,16.53,21.25,21.22,31.94,8.21,15.61,38.12,23.44 +21.65,17.76,21.65,21.72,33.87,8.17,16.02,40.02,23.39 +21.035,19,23.07,23.134999999999998,32.875,8.305,16.515,40.32,24.79 +20.42,20.24,24.49,24.55,31.88,8.44,17.01,40.62,26.19 +19.19,20.6,24.78,24.21,31.07,8.05,16.45,38.91,25.2 +19.5,20.7,24.3,24.24,31.28,7.45,16.09,37.93,25.9 +19.12,19.67,23.46,23.01,31.48,7.27,15.39,37.08,24.48 +19.43,20.17,23.51,23.6,31.73,7.37,15.66,36.55,24.61 +18.46,19.51,24.47,23.45,30.51,7,15.71,36.14,25.67 +17.27,19.47,23.32,22.1,29.09,6.6,15.48,35.1,24.64 +16.67,19.75,23.44,22.3,29.6,6.3,15.51,35,24.3 +16.5,19.81,21.94,22.25,29.05,6.29,14.57,35.13,24.14 +16.43,18.7,22.33,20.79,28.88,6.47,14.57,34.81,23.78 +15.39,17.91,22.67,21.93,28.93,6.53,14.09,33.44,23.94 +14.34,17.12,22.22,20.25,27.87,6.78,14.15,33.92,23 +15.155000000000001,17.69,22.5,20.705,28.53,6.895,14.21,34.144999999999996,23.905 +15.97,18.26,22.78,21.16,29.19,7.01,14.27,34.37,24.81 +15.62,18.17,21.74,20.4,28.96,6.84,13.53,33.2,23.86 +16.01,18.79,25.12,23.31,30.13,6.91,15.04,34.94,25.57 +17.54,19.46,27.31,28.26,31.79,7.07,16.93,36.98,28.28 +18.38,20.89,28.59,29.43,31.98,7.2,18.11,37.48,29.68 +18.71,20.83,29.46,29.51,31.37,7.19,17.9,37.1,29.43 +17.45,20.81,28.03,30.92,30.72,7.25,18.48,37.89,30.1 +17.97,19.76,28.51,28.38,31.02,7.65,18.21,37.32,29.28 +18.62,20.35,29.42,28.9,31.26,7.9,20.07,36.87,30 +19.15,21.65,31.44,31.16,31.27,8,21.52,38.36,31.95 +18.51,21.47,30.61,29.13,29.7,7.92,20.52,37.24,31.29 +18.52,20.65,31.94,29.23,30.21,7.78,21.28,37.31,31.13 +17.93,19.65,30.09,27.26,30.28,7.35,19.28,37.06,30.49 +16.59,19.03,28.8,26.47,26.86,6.93,17.8,36.98,28.78 +17.08,19.18,29.19,26.5,28.25,6.98,17.97,39.19,29.76 +16.81,19.05,28.17,25.13,29.13,7.07,16.8,37.43,28.5 +16.23,19.23,28.14,25.54,28.49,7.12,16.79,36.79,29.8 +16.13,19,26.73,24.6,29.29,6.98,16.85,36.52,29.1 +16.26,17.95,25.98,23.97,31.07,6.77,15.97,36.67,29.53 +16.270000000000003,18.189999999999998,26.215,24.63,31.314999999999998,6.8,16.185,37.355000000000004,30.78 +16.28,18.43,26.45,25.29,31.56,6.83,16.4,38.04,32.03 +16.48,18.35,26.68,25.22,31.57,6.8,16.6,38.01,31.66 +16.07,17.59,26.81,24.25,31.08,6.86,16.39,37.58,31.16 +15.73,17.9,26.73,23.45,32.03,6.69,16.55,37.06,30.29 +15.94,18.16,27.02,23.24,30.72,6.84,16.26,36.74,30.41 +15.86,18.47,26.97,23.8,29.76,7.01,16.55,36.3,30.79 +16.46,18.48,26.57,23.9,30.83,7.11,16.8,36.08,30.56 +15.75,18.81,26.3,23.44,28.84,7.3,16.7,35.7,29.37 +16.07,18.71,26.63,25.04,27.91,7.05,16.93,35.37,30.25 +17.05,20.9,27.95,27.98,29.54,7.54,18.3,36.79,33.39 +16.75,19.88,26.69,24.81,28.18,7.42,17.24,35.73,32.1 +16.58,20.29,27.3,24.25,28.19,7.34,17.5,34.94,29.89 +16.31,19.96,25.99,24.42,28.65,6.95,16.99,35.59,30.05 +15.82,20.01,25.65,25.65,27.94,6.69,16.68,35.59,31.94 +15.83,20.33,27.41,26.02,28.16,6.67,17.02,35.51,33.66 +16.14,20.67,27.88,26.11,28.79,6.44,17.68,35.45,33.42 +16.67,20.9,28.01,25.45,29.59,6.45,17.56,35.59,31.9 +17.3,20.83,29.4,27.29,29.62,7.02,18.6,36.43,32.69 +18.32,21.7,29.1,28.06,30.98,7.09,19.43,36.06,33.63 +17.87,20.34,27.54,26.5,30.84,6.89,17.89,35.06,31.65 +17.37,20.25,25.43,24.37,31.33,6.77,16.53,35.15,29.67 +17.59,19.88,25.97,25.03,29.73,6.78,16.66,34.99,29.54 +16.91,19.69,25.64,23.53,28.63,6.96,16.53,34.86,28.79 +16.43,19.46,26.25,24.51,28.19,6.61,15.92,35.67,29.49 +17.53,19.52,26.74,24.5,29.06,6.81,16.4,37.08,30.1 +17.16,19.19,26.43,23.07,28.6,6.95,15.62,36.31,29.03 +17.32,19.17,26.23,23.58,28.73,6.95,15.77,36.29,28.58 +17.6,19.28,26.36,23.45,28.67,7.45,16.48,36.64,28.46 +17.48,19.15,26.16,23.38,27.23,7.09,15.64,35.15,28.16 +17.37,19.26,24.71,22.56,27.09,7.11,15.73,36.35,28.05 +16.65,19.61,23.92,21.09,25.65,6.83,14.85,35.67,26.77 +16.19,19.79,23.25,20.63,25.31,6.92,14.81,35.72,26.24 +16.01,18.45,22.74,19.89,24.74,6.63,15.42,35.15,27.09 +15.81,18.28,23.31,21.29,25,6.24,16.17,35.18,27.82 +15.95,18.64,23.67,21.71,25.31,6.19,17.49,36.06,28.94 +15.99,18.89,23.03,21.94,25.41,6.11,17.19,36.17,29.26 +14.83,18.55,22.62,21.31,24.91,5.87,16.36,36.15,29.4 +15.06,18.72,23.9,22.93,25.52,5.76,17.3,36.09,30.38 +15.19,18.39,24.11,23.02,25.83,5.89,18.15,35.7,30.35 +15.34,18.71,23.25,22.61,26.08,6.29,17.41,35.31,30.78 +16.1,18.71,23.94,24.01,27.43,6.3,17.03,35.12,32.19 +15.44,18.9,22.49,22.62,26.28,6.05,16.42,34.83,30.27 +15.96,18.93,21.74,21.92,25.34,5.98,15.59,33.34,30.43 +16.09,18.605,22.125,21.995,25.619999999999997,6.18,15.959999999999999,33.2,30.310000000000002 +16.22,18.28,22.51,22.07,25.9,6.38,16.33,33.06,30.19 +16.19,18.81,22.05,21.39,26.73,6.16,16.48,32.82,30.1 +15.61,18.46,22.07,21.3,25.04,6.04,16.24,31.64,28.92 +15.86,18.36,21.34,20.73,24.99,5.81,16.58,32.34,27.67 +16.07,18.88,21.81,21.6,24.27,5.8,17.14,31.77,28.86 +15.56,19.14,22.76,21.91,24.6,6.19,16.88,32.33,29.25 +15.31,19.19,22.17,20.66,24.24,6.25,16.77,31.87,28.04 +15.52,19.37,21.02,19.74,26.16,5.9,16.82,32.25,27.17 +15.15,18.44,21.33,19.35,26.78,5.8,16.46,31.44,26.66 +14.85,17.8,20.61,18.51,26.43,5.79,15.94,30.94,25.51 +15.51,18.55,21.15,19.48,26.3,5.84,16.08,31.18,26.44 +15.02,17.96,21.21,19.64,25.52,5.88,15.96,30.99,26.04 +15.4,17.71,21.19,19.22,26.23,6.11,15.9,30.89,25.71 +14.87,17.49,21.07,18.49,25.54,6.02,17.11,30.43,25.35 +14.76,17,20.08,18.6,25.75,6.17,16.8,29.25,24.95 +14.04,16.65,18.64,16.75,25.57,5.86,15.82,28.26,23.15 +13.61,16.86,18.62,15.74,23.8,6.29,15.21,26.8,22.58 +13.76,16.72,19.14,16.65,25.15,6.15,15,26.73,22.73 +14.57,15.94,19.64,17.34,24.37,6.6,15.98,26.51,23.24 +14.04,15.42,19.73,16.93,23.7,6.27,15.72,25.71,22.82 +14.23,15.9,19.85,16.31,23.74,6.24,15.21,24.35,22.99 +14.07,16.2,19.84,17.74,23.02,6.29,15.22,24.64,24.64 +14.36,16.38,19.81,17.22,23.47,6.59,14.31,24.42,24.24 +13.98,16.05,19.25,16.07,23.25,6.63,14.54,23.8,23.47 +14.52,15.89,18.9,15.58,22.74,6.72,14.31,23.25,23.35 +15.365,15.995000000000001,19.03,16.225,23.475,6.775,14.34,24.595,24.165 +16.21,16.1,19.16,16.87,24.21,6.83,14.37,25.94,24.98 +15.8,16.02,19.42,17.07,23.37,6.85,14.17,27.76,23.68 +15.01,15.55,19.4,16.71,22.82,6.86,13.85,28.07,23.65 +15.34,15.61,19.7,17.6,23.69,6.64,14.29,28.06,24.75 +15.55,15.52,20.52,18.01,23.19,6.87,14.56,27.49,25.81 +15.05,15.53,20.99,18.27,22.61,6.92,14.76,25.57,25.38 +14.79,15.41,20.57,18.26,21.86,7.45,14.77,26.03,25.41 +14.15,15.14,20.37,17.94,21.86,6.25,14.33,26.3,24.94 +13.22,14.5,19.55,16.33,20.23,5.41,13.91,26.65,24.26 +12.77,14.67,19.16,16.83,19.32,5.65,14.28,26.66,24.19 +12.37,14.74,18.81,16.51,18.33,5.81,14.21,26.75,24.48 +12.36,14.67,19.05,16.83,18.47,5.82,14.03,25.95,24.5 +12.91,19.17,19.59,17.33,19.08,5.68,15.3,26.79,24.9 +12.52,19.47,19.42,16.81,19.18,5.56,15.62,25.62,24.49 +12.41,19.52,19.8,17.76,19.6,5.61,16.2,25.98,24.83 +12.53,17.4,19.28,17.66,19.65,5.6,15.86,26.57,24.67 +12.11,17.28,18.41,16.52,19.85,5.33,14.95,26.38,24.87 +14.48,18.05,18.35,15.95,24.64,5.09,13.98,27.84,25.13 +12.79,18.84,17.59,15.08,22.23,5.27,13.73,27.25,25.04 +12.93,18.05,18.29,15.69,21.9,5.23,14.29,27.66,25.35 +12.96,17.93,17.76,15.76,22.27,5.17,15.67,27.74,24.35 +12.92,17.7,17.78,15.06,22.39,5.08,15.53,27.86,24.79 +12.76,16.97,17.3,15.18,22.1,5.17,15.58,27.09,24.81 +12.64,16.44,17.02,14.91,21.78,5.03,15.38,26.56,24 +14.01,16.52,16.97,15.11,23.33,5.03,15.62,27.52,24.92 +13.74,16.24,16.93,14.23,22.48,4.95,14.92,26.85,24.45 +13.89,15.87,17.55,14.21,22.49,4.92,14.7,26.36,24.69 +12.99,15.59,17.28,13.71,21.75,4.69,14.28,25.85,23.97 +12.99,15.975,17.605,13.815000000000001,21.740000000000002,4.79,14.745000000000001,26.475,24.244999999999997 +12.99,16.36,17.93,13.92,21.73,4.89,15.21,27.1,24.52 +12.83,16.31,17.93,15.05,21.99,4.93,15.5,27.57,24.6 +13.05,16.36,18.01,14.62,22.06,4.94,14.94,27.86,24.77 +12.91,16.46,17.93,15,21.95,5.04,15.8,27.65,24.62 +13.27,17.26,17.98,14.37,21.6,4.94,16.06,28.08,23.85 +14.23,18.1,18.07,13.74,22.54,4.73,15.26,28.88,23.92 +14.73,17.96,17.75,14.8,22.17,4.84,15.11,28.64,23.86 +14.67,16.88,17.94,14.17,22.2,5.22,14.42,28.65,23.92 +15.52,17.87,18.7,17.33,23.58,5.13,16.53,28.81,25.57 +14.35,17.39,18.04,15.7,21.67,5.04,15.63,28.39,24.97 +14.69,16.97,18.49,16.01,21.84,5.08,15.44,28.14,25.86 +14.62,17.1,18.45,15.42,21.56,5.46,15.25,27.34,25.86 +14.07,16.73,18.71,15.6,21.11,5.24,15.1,28.19,25.15 +14.33,15.95,19.88,15.94,22.45,5.18,14.95,27.91,24.55 +14.68,16.67,19.68,16.57,22.55,5.04,15.38,28.72,24.62 +14.55,16.37,20.92,16.75,22.44,5.18,16.12,28.85,25.41 +15.05,16.79,21.3,17.38,22.85,5.32,16.06,28.68,25.88 +14.85,17.11,21.25,17.96,22.43,5.5,16.53,28.85,26.23 +15.46,18.78,21.43,20.56,21.85,5.44,19.24,29.51,28.17 +14.37,19.39,21.55,20.76,20.64,4.94,19.83,28.91,28.34 +14.17,18.92,21.49,19.63,20.37,4.89,18.58,29.42,28.42 +14.76,19.1,22.15,21.03,21.41,5.09,20.51,30.2,29.34 +14.51,18.81,22.1,21.7,21.19,5.18,20.4,29.72,29.1 +14.82,18.85,22.86,22.37,20.77,5.27,20.36,29.88,29.14 +14.85,18.88,22.72,20.47,20.74,5.35,19.8,29,27.51 +14.49,17.54,22.7,19,20.67,4.88,19.22,28.67,24.09 +14.35,17.55,22.04,18.89,20.43,4.86,19.13,28.21,25.09 +13.53,17.19,21.08,17.94,20.17,5,17.71,26.74,26.15 +12.55,17.86,20.12,17.15,18.64,4.97,16.44,25.32,26.31 +13.33,18.59,19.42,17.5,19.82,5.19,16.71,25.95,26.85 +13.46,19.52,19.41,15.86,19.61,5.11,15.82,24.87,24.25 +13.43,19.38,19.18,15.19,19.43,5.42,14.43,23.61,23.41 +13.51,17.96,19.15,15.33,19.34,5.46,14.38,23.55,23.5 +13.84,17.25,19.2,15.28,19.66,5.54,14.13,24.47,24.29 +13.46,16.51,18.66,14.84,19.68,5.31,14.39,23.78,24.4 +13.55,16.92,18.51,14.51,20.36,6.05,13.98,24.37,25.51 +13.24,16.68,18.18,14.5,19.83,6.2,13.79,24.16,25.44 +12.72,16.47,18.03,14.42,19.33,6.28,13.99,24.22,25.05 +12.9,16.52,17.96,15.37,20,6.32,14.21,24.46,26.35 +12.85,16.68,17.33,15.4,19.59,6.07,13.89,24.07,26.68 +13.45,17.509999999999998,17.36,15.745000000000001,19.975,6.415,14.575,24.715,27.435000000000002 +14.05,18.34,17.39,16.09,20.36,6.76,15.26,25.36,28.19 +13.44,18.49,18.9,16.49,19.62,6.59,14.9,25.17,28.39 +14.01,18.95,18.98,16.74,20.12,7.03,15.96,26.78,28.23 +13.27,18.52,19.27,16.05,19.14,6.75,15.51,26.17,28.15 +14.47,20.43,19.34,16.89,20.29,7.09,16.39,27.97,30.11 +15.36,19.68,19.76,18.44,19.79,7.85,16.74,28.33,32.18 +15.89,20.49,20.09,18.64,20.8,8.07,17.17,28.24,32.47 +16.42,20.63,20.29,19.23,22.56,7.82,17.16,28.6,33.2 +17.86,19.53,20.77,20.04,22.89,7.77,17.81,28.82,34.76 +16.5,20.17,21.74,20.9,21.98,8.02,18.12,29.34,36.33 +16.58,18.93,22.64,20.44,21.02,7.85,17.39,29.13,36.15 +17.14,19.18,21.72,18.82,20.93,7.22,17.03,28.37,36.41 +15.5,18.98,21.31,18,20.68,7.25,17.24,27.83,36.74 +16.9,18.56,20.72,17.4,26.64,7.19,16.95,29.28,37.23 +16.44,19.98,22.12,19.13,27.91,6.89,18.45,31.53,39.24 +16.65,20.73,21.22,19.34,27.58,6.9,18.99,30.6,42.5 +17.21,19.8,21,18.6,29.38,7.14,18.91,31.04,40.27 +17.99,20.07,21.96,20.26,33.37,7.59,19.92,32.62,41.15 +17.84,19.42,21.41,20.29,31.39,7.73,19.11,32.97,39.71 +17.17,19.86,23.76,22.52,30.41,7.43,20.27,33.97,45.6 +18.83,21.52,23.99,21.95,36.99,7.56,21.34,33.81,44.24 +18.73,22.38,25.08,23.66,35.34,7.72,22.45,33.31,43.73 +18.2,23.36,25.87,22.9,34.42,7.16,22.45,33.44,43.49 +19.13,24.22,24.89,21.13,31.21,7.61,22.23,33.61,43.93 +17.6,24.62,24.97,20.67,29.5,7.76,21.99,33.79,51.03 +17.56,25.09,24.78,22.1,29.84,8.06,22.72,35.28,54.73 +17.5,25.28,24.4,21.28,29.61,7.43,22.94,36.88,57.61 +17.86,27.72,24.92,23.08,30.94,7.56,26.05,39.88,56.24 +18.11,30.03,26.05,24.64,30.79,7.79,29.32,41.3,54.42 +17.69,31.32,26.83,26.37,31.49,7.32,35.72,44.88,66.7 +17.98,37.23,27.99,25.71,29.36,7.49,37.32,44.14,66.56 +20.04,36.36,29.13,31.9,32.79,8.43,37.56,45.28,66.82 +19.68,34.09,30.5,29.64,32.14,8.82,38.24,45.82,68.51 +18.08,31.16,26.91,27.34,31.07,8.5,33.62,43.6,70.03 +17.27,29.36,25.76,25.49,29.02,8.13,30.46,40.65,72.83 +17.01,27.14,24.44,23.18,26.19,8.27,25.77,38.08,67.66 +16.58,30.68,23.82,22.83,27.6,8.13,26.92,38.26,61.17 +16.91,31.78,23.46,22.12,27.29,7.98,25.27,39.04,53.6 +16.24,31.27,23.39,21.09,27.2,7.86,26.2,38.51,58.16 +16.49,32.44,22.68,20.05,27.7,7.75,28.65,38.79,31.3 +16.3,30.11,22.84,20.31,28.47,7.25,25.78,39.09,31.18 +16.75,28.93,22.99,20.38,29.13,7.42,25.04,38.74,33.2 +18.08,30.45,22.44,19.93,33.96,7.64,25.44,42.29,33.87 +21.54,30.52,22.72,18.87,38.4,8.21,24.4,47.19,34.22 +21.1,31.84,23.39,20.48,37.27,8.85,25.47,47.88,34.82 +20.1,33.13,23.12,20.37,36.28,8.95,27.76,48.53,35.33 +23.38,31.39,23.35,20.52,41,9.37,26.45,49.31,35.72 +23.18,30.34,23.28,20.87,39.23,9.35,25.82,51.71,36.72 +21.38,29.47,22.94,18.75,35.91,8.32,23.72,49.47,35.76 +22.23,30.72,23.61,18.48,33.55,8.28,24.46,51.23,35.07 +21.43,30.79,23.81,18.58,35.05,8.39,23.87,49.08,32.83 +21.29,30.71,24.16,19.11,34.67,8.64,24.2,48.82,32.95 +20.98,33.12,25.1,19.21,32.02,8.44,25.58,48.03,32.65 +23.36,32.99,25.78,18.55,36.32,8.67,25.22,50.8,33.88 +23.37,34.39,24.44,19.6,37.14,8.86,25.61,49.44,34.74 +23.97,33.17,23.87,19.25,36.09,8.65,25.84,47.48,34.41 +24.26,33.14,22.68,18.95,37.84,8.3,25.87,52.82,35.39 +21.5,33.64,21.43,19.2,33.86,8.55,25.07,51.63,35.81 +21.23,34.92,23.25,17.52,32.41,9.24,24.14,49.8,35.61 +20.64,36.28,23.29,18.42,31.33,9.25,23.99,49.53,36.47 +20.67,37.49,22.41,17.23,30.86,9.16,25.34,47.76,35.32 +21.24,36.02,24.24,17.88,31.2,9,26.51,46.49,35.11 +23.205,36.230000000000004,24.055,18.59,34.805,9.01,27.19,48.480000000000004,34.695 +25.17,36.44,23.87,19.3,38.41,9.02,27.87,50.47,34.28 +22.33,37.49,24.43,20.49,40.68,9.24,30.03,51.06,36.11 +21.29,38.57,25.84,20.2,36.82,9.86,28.26,50.64,35.01 +20.62,36.16,26.08,19.84,35.15,10.06,26.73,50.02,32.45 +20.29,35.39,28.88,19.68,34.93,8.87,27.73,49.28,32.43 +20.02,34.2,29.28,18.21,33.34,8.72,27.5,48.64,31.66 +19.62,39.68,28.47,19.68,33.65,8.74,31.64,50.63,33.26 +20.04,37.56,28.64,21.38,32.69,9.04,31.48,49.22,33.7 +19.27,42.21,27.67,22.93,33.57,9.16,36.31,51.86,34.01 +19.11,44.95,27.3,23.65,33.35,9.35,36.21,52.38,34.78 +19.66,49.77,26.73,24.24,33.92,9.2,39.43,54.11,35.38 +20.95,57.55,26.32,25.68,35.87,8.99,40.1,57.12,39.94 +22.08,55.77,26.65,26.63,39.84,9.24,40.42,56.85,41.27 +20.9,56.75,28.15,25.63,39.3,9.2,39.58,55.37,39.46 +19.28,57.45,26.12,22.5,34.88,9.85,35.51,51.7,37.34 +18.68,54.84,25.84,20.61,33.61,9.19,30.43,51.25,34.79 +20.26,54.82,26.03,20.23,34.74,9.63,30.46,52.2,33.9 +19.76,53.97,25.06,19.59,33.28,9.71,27.31,51.44,33.25 +19.37,54.88,25.18,19.47,32.98,9.49,27.51,51.56,32.7 +19.145000000000003,54.78,26.14,19.134999999999998,32.575,9.575,27.18,51.120000000000005,32.815 +18.92,54.68,27.1,18.8,32.17,9.66,26.85,50.68,32.93 +20.15,53.63,26.68,20.49,35.23,9.99,27.52,51.48,34.27 +19.48,51.17,25.92,20.86,32.43,9.74,28.39,50.65,32.96 +20.08,50.24,26.01,22.26,36.65,9.72,29.18,50.07,34.21 +20.155,52.245000000000005,26.235,21.955,35.92,9.775,29.494999999999997,50.03,34.39 +20.23,54.25,26.46,21.65,35.19,9.83,29.81,49.99,34.57 +21.26,57.67,26.97,24.23,35.12,10.01,32.37,51.13,37.12 +20.78,56.45,27.24,24.74,33.95,10.02,33.31,50.34,36.45 +20.35,53.25,27.11,23.51,32.9,10.44,32.05,49.78,35 +19.2,50.96,25.57,22.28,32.13,10.24,30.66,48.78,34.32 +18.44,50.07,25.39,21.14,30.76,9.92,30.15,47.8,33.85 +17.65,53.13,25.69,22.34,30.36,10.29,31.86,48.39,34.84 +17.63,53.78,24.82,21.74,31.72,10.36,34.75,47.15,33.99 +18.52,53.17,25.49,23.96,33.45,10.59,35.29,48.33,35.81 +21.83,57.77,26.1,24.07,35.71,14.14,35.09,51.43,35.1 +22.66,56.44,26.17,21.37,38.07,14.55,33.12,49.47,34.16 +23.045,57.215,27.015,21.48,37.905,13.965,33.06,49.685,34.55 +23.43,57.99,27.86,21.59,37.74,13.38,33,49.9,34.94 +22.34,55.09,26.72,20.54,37.14,13.64,30.2,49.24,33.42 +21.25,56.4,26.66,19.36,34.45,11.62,28.04,47.82,32.04 +20.62,57.62,25.83,18.13,33.8,11.62,27.61,46.26,32.67 +20.04,56.65,25.6,18.3,33.58,13.61,27.13,46.22,33 +20.16,54.23,25.16,19.04,33.72,13.5,27.43,44.7,32.91 +19.31,55.18,25.04,21.36,32.91,13.04,31.86,44.34,34.68 +20.25,54.69,25.26,21.75,36.28,12.57,31.73,45.78,34.71 +19.65,58.62,25.52,24.34,34.86,12.68,32.16,40.91,37.75 +20.33,57.84,25.98,24.33,34.69,12.62,30.95,44.6,37.71 +20.3,59.68,25.94,22.18,33.89,12.2,29.65,43.61,35.74 +20.36,62.73,27.32,22.42,33.24,12.48,30.83,43.84,36.3 +20.06,63.14,26.06,21.83,33.71,12.55,29.72,43.26,35.61 +20.59,60.83,24.87,21.69,34.34,12.42,29.24,44.34,36.96 +20.3,56,24.98,22.72,33.89,13.51,29.87,45.99,38.05 +19.71,59.29,23.99,21.88,33.74,13.19,29.67,45.71,39.1 +20.07,60.23,23.03,22.16,33.58,13.07,29.89,46.46,40.44 +19.31,58.69,22.51,19.78,32.33,12.46,27.2,44.58,38.43 +18.49,55.53,21.59,18.82,31.81,11.61,25.8,44.32,36.97 +19.049999999999997,55.93,21.29,19.55,33.82,11.78,26.59,44.89,37.205 +19.61,56.33,20.99,20.28,35.83,11.95,27.38,45.46,37.44 +19.54,55.97,20.91,20.14,35.23,11.6,27.31,44.8,37.78 +18.62,55.75,20.78,19.46,33.72,11.93,26.39,42.52,36.02 +19.26,54,20.61,17.41,35.04,11.6,25.94,41.99,34.56 +19.4,58.67,21.35,17.9,34.22,11.15,25.57,41.21,34.54 +18.77,56.27,21.35,17.1,33.38,10.2,24.4,38.13,33.52 +17.4,54.93,21.1,17.19,31.82,8.97,23.23,37.54,33.69 +16.65,56.46,21.03,17.42,31.26,10.19,24.51,34.71,33.61 +15.91,55.85,20.93,16.54,30.52,10.25,23.77,34.24,32.7 +16.38,54.92,20.66,16.63,31.55,10.38,24.14,34.79,34.32 +16.76,53.05,20.68,17.16,31.35,10.29,23.5,34.99,34.15 +16.66,48.92,20.92,18.13,30.6,11.01,23.67,36.94,35.42 +16.9,47.09,20.85,18.24,30.67,10.77,23.26,36.15,35.04 +18.43,48.96,20.81,19.15,31.14,10.95,24.71,39.11,36.19 +17.58,48.99,21.34,19.87,31.53,10.96,24.44,40.53,38.13 +18.97,50.92,21.89,21.44,31.11,12.46,25.43,41.25,39.09 +20.01,49.17,21.95,20.57,33.07,14.09,24.99,41.88,38.28 +19.07,49.59,22.44,19.92,32.52,13.53,24.12,40.3,37.59 +18.9,54.56,23.07,21.14,31.72,14.23,24.71,41,40.17 +19.14,56.93,23.67,20.99,31.63,13.9,24.15,41.56,40.3 +20.83,59.85,23.38,20.84,32.18,13.99,23.71,41.42,39.41 +18.65,53.65,23.33,18.63,31.56,12.95,23.09,39.71,38.89 +17.25,53.82,22.27,19.9,28.16,13.54,23.38,38.77,38.84 +17.21,50.84,21.22,17.58,29.27,13.32,22.01,36.79,37.08 +17.26,49.32,21.2,17.57,29.64,14.05,21.25,37.07,37.1 +16.84,47.94,20.15,17.06,29.41,13.08,21.2,36.05,37.23 +16.38,47.03,20.14,18.89,28.3,12.41,22.16,36.1,38.51 +16.02,49.58,20.17,19.95,28.75,13.19,22.91,36.76,38.52 +15.83,52.84,20.56,19.16,28.76,13.33,23.49,37.5,36.93 +16.64,51.98,23.93,18.48,29.02,14.07,22.22,38.16,36.16 +16.34,53.72,22.42,19.07,28.69,13.78,22.43,37.93,35 +16.04,50.9,23.12,19.26,28.62,13.29,22.29,37.76,35.92 +16.01,49.84,22.87,18.53,27.97,13.58,21.87,46.17,35.79 +15.875,49.085,23.625,18.770000000000003,27.91,12.95,21.69,41.45,36.06 +15.74,48.33,24.38,19.01,27.85,12.32,21.51,36.73,36.33 +15.2,46.25,24.35,18.49,27.19,12,21.16,36.65,35.59 +14.79,48.1,29.65,19.94,27.78,11.57,21.36,36.74,34.37 +15,46.65,34.08,20.49,28.9,12.25,20.51,37.08,34.58 +14.23,43.04,33.85,18.76,28.12,12.14,19.63,36.54,34.45 +14.79,44.2,38.09,19.44,27.68,12.68,19.79,37.45,34.85 +15.18,44.65,34.53,18.63,27.25,12.28,19.52,37,34.52 +14.6,46.14,32.44,18.16,26.48,12.47,19.68,36.86,33.85 +14.54,43.48,31,17.47,26.83,13.09,19.17,36.54,33.57 +14.47,42,32.67,19.38,26.75,12.92,19.9,36.32,34.02 +15.39,42.06,32.39,19.44,27.73,13.23,19.45,35.74,34.16 +15.04,41.04,30.95,19.3,27.36,13.19,19.97,35.53,33.8 +15.79,39.01,31.45,18.76,27.66,13.03,19.66,35.8,32.8 +15.87,37.98,31.84,18.89,27.97,13,19.1,35.62,31.76 +16.39,36.61,33.03,18.62,29.09,12.93,19.77,35.65,31.42 +16.42,37.46,33.89,19.23,28.2,13,20.04,35.52,32.17 +16.19,37.31,32.58,18.73,27.41,12.15,19.78,35.48,32.09 +15.95,36.68,32.31,19.14,27.45,12.44,19.67,35.33,32.63 +16.76,36.28,30.82,19.98,27.39,13.52,20.61,35.9,32.87 +17.29,35.62,30.1,18.3,27.77,13.89,19.89,34.84,32.24 +17.62,36.2,29.31,18.07,28.19,14.03,20.78,35.45,32.72 +17.15,36.24,30.73,19.74,28.66,13.57,21.17,35.13,32.53 +17.53,36.88,31.51,20.98,28.33,13.76,21.74,36,32.73 +17.91,38.13,31.01,20.87,28.76,13.59,22.54,36.38,32.67 +15.84,35.09,30.68,18.21,26.7,11.77,20.5,33.96,30.49 +15.94,34.86,29.07,17.63,26.1,12.4,21.88,33.14,30.61 +15.63,33.74,29.11,19.29,26.61,12.31,20.68,31.91,30.76 +15.93,34.33,27.45,18.32,25.94,11.84,20.26,32.21,29.63 +16.21,32.63,27.09,17.41,26.52,12.39,19.43,30.68,29.5 +15.48,29.95,27.21,15.99,25.99,12.11,18.23,30.31,28.84 +15.81,30.63,27.7,17.51,26.47,12.36,18.85,29.71,29.41 +15.88,34.82,28.94,17.52,27.7,13.48,20.03,31.1,29.76 +15.25,33.7,28.68,16.79,26.82,13.26,19.48,30.59,29.37 +14.62,31.61,28.08,16.63,26.07,12.79,18.7,30.8,29.08 +14.19,31.29,29.71,15.87,25.35,13.29,18.64,30.1,28.99 +14.705,32.974999999999994,30.495,17.5,26.17,13.62,19.415,30.96,30.275 +15.22,34.66,31.28,19.13,26.99,13.95,20.19,31.82,31.56 +14.08,35.29,30.76,18.47,26.31,13.35,20.18,31.79,31.56 +14.51,34.2,31.78,18.97,26.41,13.32,19.9,31.33,31.94 +13.99,33.74,30.58,18.71,24.7,12.88,19.17,30.85,32.07 +14.88,35.3,31.18,19.48,27.25,13.71,19,31.84,32.52 +14.86,35.74,30.57,18.82,26.3,13.56,18.73,31.71,31.66 +15.15,37.4,29.87,18.62,26.63,13.42,19.37,31.7,31.32 +16.01,38.42,31.37,20.2,27.92,13.98,20.22,32,31.56 +15.12,33.54,31.48,19.73,26.98,14.22,17.86,31.54,31.62 +15.14,34.04,32.29,20.56,27.67,14.57,18.69,32.41,31.65 +14.83,33.01,32.37,20.21,26.53,14.44,18.28,31.15,31.34 +14.37,31.79,30.42,18.08,26.62,14.84,17.02,30.21,30.29 +14.3,31.76,29.59,17.88,26.15,14.3,16.62,30.09,30.63 +13.99,31.51,30.1,18.68,25.92,14.84,16.98,30.16,31.28 +14.34,32.55,31.12,20.11,26.48,15.05,17.6,31.35,32.01 +14.36,32.39,31.99,20.31,26.14,14.39,17.11,32.05,31.32 +13.58,32.83,31.13,18.9,25.1,12.93,17.09,30.61,30.33 +13.53,31.59,30.24,18.21,24.76,12.78,16.62,30.02,30.01 +13.26,30.4,29.73,17.52,24.99,13.72,15.96,29.76,29.77 +13.85,30.06,29.12,17.05,24.33,12.81,15.11,30.03,29.17 +13.89,29.69,28.13,16.21,24.8,12.85,14.67,29.54,28.15 +13.83,29.6,27.8,17.9,24.91,12.32,15.19,29.29,27.63 +13.12,29.26,27.58,18.1,23.49,12.39,16.46,28.86,29.14 +13.22,29.01,29.36,18.94,22.87,13.05,16.99,29.01,29.22 diff --git a/python/src/bvhar/model/__init__.py b/python/src/bvhar/model/__init__.py new file mode 100644 index 00000000..2413c2f3 --- /dev/null +++ b/python/src/bvhar/model/__init__.py @@ -0,0 +1,20 @@ +from ._ols import VarOls, VharOls +from ._bayes import VarBayes, VharBayes +from ._spec import LdltConfig, SvConfig, InterceptConfig +from ._spec import SsvsConfig, HorseshoeConfig, MinnesotaConfig, LambdaConfig, DlConfig, NgConfig + +__all__ = [ + "VarOls", + "VharOls", + "VarBayes", + "VharBayes", + "SsvsConfig", + "HorseshoeConfig", + "MinnesotaConfig", + "LambdaConfig", + "NgConfig", + "DlConfig", + "LdltConfig", + "SvConfig", + "InterceptConfig" +] \ No newline at end of file diff --git a/python/src/bvhar/model/_bayes.py b/python/src/bvhar/model/_bayes.py new file mode 100644 index 00000000..455782c6 --- /dev/null +++ b/python/src/bvhar/model/_bayes.py @@ -0,0 +1,804 @@ +from ..utils._misc import make_fortran_array, check_np, build_grpmat, process_record, concat_chain, concat_params, process_dens_forecast +from ..utils.checkomp import get_maxomp +from .._src._design import build_response, build_design +from .._src._ldlt import McmcLdlt +from .._src._ldltforecast import LdltForecast, LdltVarRoll, LdltVharRoll, LdltVarExpand, LdltVharExpand +from .._src._sv import SvMcmc +from .._src._svforecast import SvForecast, SvVarRoll, SvVharRoll, SvVarExpand, SvVharExpand +from ._spec import LdltConfig, SvConfig, InterceptConfig +from ._spec import _BayesConfig, SsvsConfig, HorseshoeConfig, MinnesotaConfig, DlConfig, NgConfig +import numpy as np +import pandas as pd +import warnings +from math import floor + +class _AutoregBayes: + """Base class for Bayesian estimation""" + def __init__( + self, data, lag, p, n_chain = 1, n_iter = 1000, + n_burn = None, n_thin = 1, + bayes_config = SsvsConfig(), + cov_config = LdltConfig(), + intercept_config = InterceptConfig(), fit_intercept = True, + minnesota = "longrun" + ): + self.y_ = check_np(data) + self.n_features_in_ = self.y_.shape[1] + self.p_ = p # 3 in VHAR + self.lag_ = lag # month in VHAR + if self.y_.shape[0] <= self.lag_: + raise ValueError(f"'data' rows must be larger than 'lag' = {self.lag_}") + # self.design_ = build_design(self.y, self.lag_, fit_intercept) + # self.response_ = build_response(self.y, self.lag_, self.lag_ + 1) + self.chains_ = int(n_chain) + # self.thread_ = n_thread + self.iter_ = int(n_iter) + if n_burn is None: + n_burn = floor(n_iter / 2) + self.burn_ = int(n_burn) + self.thin_ = int(n_thin) + self.fit_intercept = fit_intercept + self.group_ = build_grpmat(self.p_, self.n_features_in_, minnesota) + self._group_id = pd.unique(self.group_.flatten(order='F')).astype(np.int32) + self._own_id = None + self._cross_id = None + n_grp = len(self._group_id) + n_alpha = self.n_features_in_ * self.n_features_in_ * self.p_ + n_design = self.p_ * self.n_features_in_ + 1 if self.fit_intercept else self.p_ * self.n_features_in_ + n_eta = int(self.n_features_in_ * (self.n_features_in_ - 1) / 2) + self.cov_spec_ = cov_config + self.spec_ = bayes_config + self.intercept_spec_ = intercept_config + self.init_ = [ + { + 'init_coef': np.random.uniform(-1, 1, (n_design, self.n_features_in_)), + 'init_contem': np.exp(np.random.uniform(-1, 0, n_eta)) + } + for _ in range(self.chains_) + ] + if type(self.cov_spec_) == LdltConfig: + for init in self.init_: + init.update({ + 'init_diag': np.exp(np.random.uniform(-1, 1, self.n_features_in_)) + }) + elif type(self.cov_spec_) == SvConfig: + for init in self.init_: + init.update({ + 'lvol_init': np.random.uniform(-1, 1, self.n_features_in_), + 'lvol': np.exp(np.random.uniform(-1, 1, self.n_features_in_ * n_design)).reshape(self.n_features_in_, -1).T, + 'lvol_sig': [np.exp(np.random.uniform(-1, 1))] + }) + if type(self.spec_) == SsvsConfig: + for init in self.init_: + coef_mixture = np.random.uniform(-1, 1, n_grp) + coef_mixture = np.exp(coef_mixture) / (1 + np.exp(coef_mixture)) + init_coef_dummy = np.random.binomial(1, 0.5, n_alpha) + chol_mixture = np.random.uniform(-1, 1, n_eta) + chol_mixture = np.exp(chol_mixture) / (1 + np.exp(chol_mixture)) + init_coef_slab = np.exp(np.random.uniform(-1, 1, n_alpha)) + init_contem_slab = np.exp(np.random.uniform(-1, 1, n_eta)) + init.update({ + 'init_coef_dummy': init_coef_dummy, + 'coef_mixture': coef_mixture, + 'coef_slab': init_coef_slab, + 'chol_mixture': chol_mixture, + 'contem_slab': init_contem_slab + }) + elif type(self.spec_) == HorseshoeConfig: + for init in self.init_: + local_sparsity = np.exp(np.random.uniform(-1, 1, n_alpha)) + global_sparsity = np.exp(np.random.uniform(-1, 1)) + group_sparsity = np.exp(np.random.uniform(-1, 1, n_grp)) + contem_local_sparsity = np.exp(np.random.uniform(-1, 1, n_eta)) + contem_global_sparsity = np.exp(np.random.uniform(-1, 1)) + init.update({ + 'local_sparsity': local_sparsity, + 'global_sparsity': global_sparsity, + 'group_sparsity': group_sparsity, + 'contem_local_sparsity': contem_local_sparsity, + 'contem_global_sparsity': np.array([contem_global_sparsity]) # used as VectorXd in C++ + }) + elif type(self.spec_) == MinnesotaConfig: + for init in self.init_: + init.update({ + 'own_lambda': np.random.uniform(0, 1), + 'cross_lambda': np.random.uniform(0, 1), + 'contem_lambda': np.random.uniform(0, 1) + }) + elif type(self.spec_) == DlConfig: + for init in self.init_: + local_sparsity = np.exp(np.random.uniform(-1, 1, n_alpha)) + global_sparsity = np.exp(np.random.uniform(-1, 1)) + contem_local_sparsity = np.exp(np.random.uniform(-1, 1, n_eta)) + contem_global_sparsity = np.exp(np.random.uniform(-1, 1)) + init.update({ + 'local_sparsity': local_sparsity, + 'global_sparsity': global_sparsity, + 'contem_local_sparsity': contem_local_sparsity, + 'contem_global_sparsity': np.array([contem_global_sparsity]) # used as VectorXd in C++ + }) + elif type(self.spec_) == NgConfig: + for init in self.init_: + local_sparsity = np.exp(np.random.uniform(-1, 1, n_alpha)) + global_sparsity = np.exp(np.random.uniform(-1, 1)) + group_sparsity = np.exp(np.random.uniform(-1, 1, n_grp)) + contem_local_sparsity = np.exp(np.random.uniform(-1, 1, n_eta)) + contem_global_sparsity = np.exp(np.random.uniform(-1, 1)) + local_shape = np.random.uniform(0, 1, n_grp) + contem_shape = np.random.uniform(0, 1) + init.update({ + 'local_shape': local_shape, + 'contem_shape': contem_shape, + 'local_sparsity': local_sparsity, + 'global_sparsity': global_sparsity, + 'group_sparsity': group_sparsity, + 'contem_local_sparsity': contem_local_sparsity, + 'contem_global_sparsity': np.array([contem_global_sparsity]) # used as VectorXd in C++ + }) + self.init_ = make_fortran_array(self.init_) + self._prior_type = { + "Minnesota": 1, + "SSVS": 2, + "Horseshoe": 3, + "HMN": 4, + "NG": 5, + "DL": 6 + }.get(self.spec_.prior) + self.is_fitted_ = False + self.coef_ = None + self.intercept_ = None + self.param_names_ = None + self.param_ = None + # self.cov_ = None + + def _validate(self): + if not isinstance(self.cov_spec_, LdltConfig): + raise TypeError("`cov_config` should be `LdltConfig` or `SvConfig`.") + if not isinstance(self.intercept_spec_, InterceptConfig): + raise TypeError("`intercept_config` should be `InterceptConfig` when 'fit_intercept' is True.") + if not isinstance(self.spec_, _BayesConfig): + raise TypeError("`bayes_spec` should be the derived class of `_BayesConfig`.") + self.cov_spec_.update(self.n_features_in_) + self.intercept_spec_.update(self.n_features_in_) + if type(self.spec_) == SsvsConfig: + self.spec_.update(self._group_id, self._own_id, self._cross_id) + elif type(self.spec_) == HorseshoeConfig: + pass + elif type(self.spec_) == MinnesotaConfig: + self.spec_.update(self.y_, self.p_, self.n_features_in_) + elif type(self.spec_) == DlConfig: + pass + elif type(self.spec_) == NgConfig: + pass + + def fit(self): + pass + + def predict(self): + pass + + def roll_forecast(self): + pass + + def expand_forecast(self): + pass + + def spillover(self): + pass + + def dynamic_spillover(self): + pass + +class VarBayes(_AutoregBayes): + """Bayesian Vector Autoregressive Model + + Fits Bayesian VAR model. + + Parameters + ---------- + data : array-like + Time series data of which columns indicate the variables + lag : int + VAR lag, by default 1 + n_chain : int + Number of MCMC chains, by default 1 + n_iter : int + Number of MCMC total iterations, by default 1000 + n_burn : int + MCMC burn-in (warm-up), by default `floor(n_iter / 2)` + n_thin : int + Thinning every `n_thin`-th iteration, by default 1 + bayes_config : '_BayesConfig' + Prior configuration, by default SsvsConfig() + cov_config : {'LdltConfig', 'SvConfig'} + Prior configuration for covariance matrix, by default LdltConfig() + intercept_config : 'InterceptConfig' + Prior configuration for constant term, by default InterceptConfig() + fit_intercept : bool + Include constant term in the model, by default True + minnesota : bool + If `True`, apply Minnesota-type group structure, by default True + verbose : bool + If `True`, print progress bar for MCMC, by default False + n_thread : int + Number of OpenMP threads, by default 1 + + Attributes + ---------- + coef_ : ndarray + VHAR coefficient matrix. + intercept_ : ndarray + VHAR model constant vector. + n_features_in_ : int + Number of variables. + + References + ---------- + .. [1] Carriero, A., Chan, J., Clark, T. E., & Marcellino, M. (2022). *Corrigendum to “Large Bayesian vector autoregressions with stochastic volatility and non-conjugate priors” [J. Econometrics 212 (1)(2019) 137–154]*. Journal of Econometrics, 227(2), 506-512. + .. [2] Chan, J., Koop, G., Poirier, D., & Tobias, J. (2019). *Bayesian Econometric Methods (2nd ed., Econometric Exercises)*. Cambridge: Cambridge University Press. + .. [3] Cogley, T., & Sargent, T. J. (2005). *Drifts and volatilities: monetary policies and outcomes in the post WWII US*. Review of Economic Dynamics, 8(2), 262–302. + .. [4] Gruber, L., & Kastner, G. (2022). *Forecasting macroeconomic data with Bayesian VARs: Sparse or dense? It depends!* arXiv. + .. [5] Huber, F., Koop, G., & Onorante, L. (2021). *Inducing Sparsity and Shrinkage in Time-Varying Parameter Models*. Journal of Business & Economic Statistics, 39(3), 669–683. + .. [6] Korobilis, D., & Shimizu, K. (2022). *Bayesian Approaches to Shrinkage and Sparse Estimation*. Foundations and Trends® in Econometrics, 11(4), 230–354. + .. [7] Ray, P., & Bhattacharya, A. (2018). *Signal Adaptive Variable Selector for the Horseshoe Prior*. arXiv. + """ + def __init__( + self, + data, + lag = 1, + n_chain = 1, + n_iter = 1000, + n_burn = None, + n_thin = 1, + bayes_config = SsvsConfig(), + cov_config = LdltConfig(), + intercept_config = InterceptConfig(), + fit_intercept = True, + minnesota = True, + verbose = False, + n_thread = 1 + ): + super().__init__(data, lag, lag, n_chain, n_iter, n_burn, n_thin, bayes_config, cov_config, intercept_config, fit_intercept, "short" if minnesota else "no") + self.design_ = build_design(self.y_, lag, fit_intercept) + self.response_ = build_response(self.y_, lag, lag + 1) + if minnesota: + self._own_id = np.array([2], dtype=np.int32) + self._cross_id = np.arange(1, self.p_ + 2, dtype=np.int32) + self._cross_id = np.delete(self._cross_id, 1) + else: + self._own_id = np.array([2], dtype=np.int32) + self._cross_id = np.array([2], dtype=np.int32) + self._validate() + self.thread_ = n_thread + if self.thread_ > get_maxomp(): + warnings.warn(f"'n_thread' = {self.thread_} is greather than 'omp_get_max_threads()' = {get_maxomp()}. Check with utils.checkomp.get_maxomp(). Check OpenMP support of your machine with utils.checkomp.is_omp().") + if self.thread_ > n_chain and n_chain != 1: + warnings.warn(f"'n_thread = {self.thread_} > 'n_chain' = {n_chain}' will not use every thread. Specify as 'n_thread <= 'n_chain'.") + if type(self.cov_spec_) == LdltConfig: + self.__model = McmcLdlt( + self.chains_, self.iter_, self.burn_, self.thin_, + self.design_, self.response_, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, int(self._prior_type), + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + verbose, self.thread_ + ) + else: + self.__model = SvMcmc( + self.chains_, self.iter_, self.burn_, self.thin_, + self.design_, self.response_, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, int(self._prior_type), + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + verbose, self.thread_ + ) + + def fit(self): + """Conduct MCMC and compute posterior mean + Returns + ------- + self : object + An instance of the estimator. + """ + res = self.__model.returnRecords() + self.param_names_ = process_record(res) + self.param_ = concat_chain(res) + self.coef_ = self.param_.filter(regex='^alpha\\[[0-9]+\\]').mean().to_numpy().reshape(self.n_features_in_, -1).T + if self.fit_intercept: + self.intercept_ = self.param_.filter(regex='^c\\[[0-9]+\\]').mean().to_numpy().reshape(self.n_features_in_, -1).T + self.coef_ = np.concatenate([self.coef_, self.intercept_], axis=0) + self.intercept_ = self.intercept_.reshape(self.n_features_in_,) + self.is_fitted_ = True + + def predict(self, n_ahead: int, level = .05, sparse = False, sv = True): + """'n_ahead'-step ahead forecasting + + Parameters + ---------- + n_ahead : int + Forecast until next `n_ahead` time point. + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + """ + fit_record = concat_params(self.param_, self.param_names_) + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltForecast( + self.chains_, self.p_, n_ahead, self.response_, sparse, fit_record, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.fit_intercept, self.thread_ + ) + else: + forecaster = SvForecast( + self.chains_, self.p_, n_ahead, self.response_, sv, sparse, fit_record, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.fit_intercept, self.thread_ + ) + y_distn = forecaster.returnForecast() + y_distn = process_dens_forecast(y_distn, self.n_features_in_) + return { + "forecast": np.mean(y_distn, axis=0), + "se": np.std(y_distn, axis=0, ddof=1), + "lower": np.quantile(y_distn, level / 2, axis=0), + "upper": np.quantile(y_distn, 1 - level / 2, axis=0) + } + + def roll_forecast(self, n_ahead: int, test, level = .05, sparse = False, sv = True): + """Rolling-window forecasting + + Parameters + ---------- + n_ahead : int + Forecast next `n_ahead` time point. + test : array-like + Test set to forecast + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + - "lpl" (float): Average log-predictive likelihood + """ + fit_record = concat_params(self.param_, self.param_names_) + test = check_np(test) + n_horizon = test.shape[0] - n_ahead + 1 + chunk_size = n_horizon * self.chains_ // self.thread_ + # Check threads and chunk size + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltVarRoll( + self.y_, self.p_, self.chains_, self.iter_, self.burn_, self.thin_, + sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + else: + forecaster = SvVarRoll( + self.y_, self.p_, self.chains_, self.iter_, self.burn_, self.thin_, + sv, sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + out_forecast = forecaster.returnForecast() + y_distn = list(map(lambda x: process_dens_forecast(x, self.n_features_in_), out_forecast.get('forecast'))) + return { + "forecast": np.concatenate(list(map(lambda x: np.mean(x, axis = 0), y_distn)), axis = 0), + "se": np.concatenate(list(map(lambda x: np.std(x, axis = 0, ddof=1), y_distn)), axis = 0), + "lower": np.concatenate(list(map(lambda x: np.quantile(x, level / 2, axis = 0), y_distn)), axis = 0), + "upper": np.concatenate(list(map(lambda x: np.quantile(x, 1 - level / 2, axis = 0), y_distn)), axis = 0), + "lpl": out_forecast.get('lpl') + } + + def expand_forecast(self, n_ahead: int, test, level = .05, sparse = False, sv = True): + """Expanding-window forecasting + + Parameters + ---------- + n_ahead : int + Forecast next `n_ahead` time point. + test : array-like + Test set to forecast + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + - "lpl" (float): Average log-predictive likelihood + """ + fit_record = concat_params(self.param_, self.param_names_) + test = check_np(test) + n_horizon = test.shape[0] - n_ahead + 1 + chunk_size = n_horizon * self.chains_ // self.thread_ + # Check threads and chunk size + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltVarExpand( + self.y_, self.p_, self.chains_, self.iter_, self.burn_, self.thin_, + sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + else: + forecaster = SvVarExpand( + self.y_, self.p_, self.chains_, self.iter_, self.burn_, self.thin_, + sv, sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + out_forecast = forecaster.returnForecast() + y_distn = list(map(lambda x: process_dens_forecast(x, self.n_features_in_), out_forecast.get('forecast'))) + return { + "forecast": np.concatenate(list(map(lambda x: np.mean(x, axis = 0), y_distn)), axis = 0), + "se": np.concatenate(list(map(lambda x: np.std(x, axis = 0, ddof=1), y_distn)), axis = 0), + "lower": np.concatenate(list(map(lambda x: np.quantile(x, level / 2, axis = 0), y_distn)), axis = 0), + "upper": np.concatenate(list(map(lambda x: np.quantile(x, 1 - level / 2, axis = 0), y_distn)), axis = 0), + "lpl": out_forecast.get('lpl') + } + + def spillover(self): + pass + + def dynamic_spillover(self): + pass + +class VharBayes(_AutoregBayes): + """Bayesian Vector Autoregressive Model + + Fits Bayesian VAR model. + + Parameters + ---------- + data : array-like + Time series data of which columns indicate the variables + week : int + VHAR weekly order, by default 5 + month : int + VHAR monthly order, by default 22 + n_chain : int + Number of MCMC chains, by default 1 + n_iter : int + Number of MCMC total iterations, by default 1000 + n_burn : int + MCMC burn-in (warm-up), by default `floor(n_iter / 2)` + n_thin : int + Thinning every `n_thin`-th iteration, by default 1 + bayes_config : '_BayesConfig' + Prior configuration, by default SsvsConfig() + cov_config : {'LdltConfig', 'SvConfig'} + Prior configuration for covariance matrix, by default LdltConfig() + intercept_config : 'InterceptConfig' + Prior configuration for constant term, by default InterceptConfig() + fit_intercept : bool + Include constant term in the model, by default True + minnesota : str + Minnesota-type group structure + - "no": Not use the group structure + - "short": BVAR-minnesota structure + - "longrun": BVHAR-minnesota structure + verbose : bool + If `True`, print progress bar for MCMC, by default False + n_thread : int + Number of OpenMP threads, by default 1 + + Attributes + ---------- + coef_ : ndarray + VHAR coefficient matrix. + + intercept_ : ndarray + VHAR model constant vector. + + n_features_in_ : int + Number of variables. + """ + def __init__( + self, + data, + week = 5, + month = 22, + n_chain = 1, + n_iter = 1000, + n_burn = None, + n_thin = 1, + bayes_config = SsvsConfig(), + cov_config = LdltConfig(), + intercept_config = InterceptConfig(), + fit_intercept = True, + minnesota = "longrun", + verbose = False, + n_thread = 1 + ): + super().__init__(data, month, 3, n_chain, n_iter, n_burn, n_thin, bayes_config, cov_config, intercept_config, fit_intercept, minnesota) + self.design_ = build_design(self.y_, week, month, fit_intercept) + self.response_ = build_response(self.y_, month, month + 1) + self.week_ = week + self.month_ = month + if minnesota == "longrun": + self._own_id = np.array([2, 4, 6], dtype=np.int32) + self._cross_id = np.array([1, 3, 5], dtype=np.int32) + elif minnesota == "short": + self._own_id = np.array([2], dtype=np.int32) + self._cross_id = np.array([1, 3, 4], dtype=np.int32) + else: + self._own_id = np.array([1], dtype=np.int32) + self._cross_id = np.array([2], dtype=np.int32) + self._validate() + self.thread_ = n_thread + if self.thread_ > get_maxomp(): + warnings.warn(f"'n_thread' = {self.thread_} is greather than 'omp_get_max_threads()' = {get_maxomp()}. Check with utils.checkomp.get_maxomp(). Check OpenMP support of your machine with utils.checkomp.is_omp().") + if self.thread_ > n_chain and n_chain != 1: + warnings.warn(f"'n_thread = {self.thread_} > 'n_chain' = {n_chain}' will not use every thread. Specify as 'n_thread <= 'n_chain'.") + if type(self.cov_spec_) == LdltConfig: + self.__model = McmcLdlt( + self.chains_, self.iter_, self.burn_, self.thin_, + self.design_, self.response_, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, int(self._prior_type), + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + verbose, self.thread_ + ) + else: + self.__model = SvMcmc( + self.chains_, self.iter_, self.burn_, self.thin_, + self.design_, self.response_, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, int(self._prior_type), + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + verbose, self.thread_ + ) + + def fit(self): + """Conduct MCMC and compute posterior mean + Returns + ------- + self : object + An instance of the estimator. + """ + res = self.__model.returnRecords() + self.param_names_ = process_record(res) + self.param_ = concat_chain(res) + self.coef_ = self.param_.filter(regex='^alpha\\[[0-9]+\\]').mean().to_numpy().reshape(self.n_features_in_, -1).T # -> change name: alpha -> phi + if self.fit_intercept: + self.intercept_ = self.param_.filter(regex='^c\\[[0-9]+\\]').mean().to_numpy().reshape(self.n_features_in_, -1).T + self.coef_ = np.concatenate([self.coef_, self.intercept_], axis=0) + self.intercept_ = self.intercept_.reshape(self.n_features_in_,) + self.is_fitted_ = True + + def predict(self, n_ahead: int, level = .05, sparse = False, sv = True): + """'n_ahead'-step ahead forecasting + + Parameters + ---------- + n_ahead : int + Forecast until next `n_ahead` time point. + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + """ + fit_record = concat_params(self.param_, self.param_names_) + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltForecast( + self.chains_, self.week_, self.month_, n_ahead, self.response_, sparse, fit_record, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.fit_intercept, self.thread_ + ) + else: + forecaster = SvForecast( + self.chains_, self.week_, self.month_, n_ahead, self.response_, sv, sparse, fit_record, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.fit_intercept, self.thread_ + ) + y_distn = forecaster.returnForecast() + y_distn = process_dens_forecast(y_distn, self.n_features_in_) + return { + "forecast": np.mean(y_distn, axis=0), + "se": np.std(y_distn, axis=0, ddof=1), + "lower": np.quantile(y_distn, level / 2, axis=0), + "upper": np.quantile(y_distn, 1 - level / 2, axis=0) + } + + def roll_forecast(self, n_ahead: int, test, level = .05, sparse = False, sv = True): + """Rolling-window forecasting + + Parameters + ---------- + n_ahead : int + Forecast next `n_ahead` time point. + test : array-like + Test set to forecast + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + - "lpl" (float): Average log-predictive likelihood + """ + fit_record = concat_params(self.param_, self.param_names_) + test = check_np(test) + n_horizon = test.shape[0] - n_ahead + 1 + chunk_size = n_horizon * self.chains_ // self.thread_ + # Check threads and chunk size + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltVharRoll( + self.y_, self.week_, self.month_, self.chains_, self.iter_, self.burn_, self.thin_, + sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + else: + forecaster = SvVharRoll( + self.y_, self.week_, self.month_, self.chains_, self.iter_, self.burn_, self.thin_, + sv, sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + out_forecast = forecaster.returnForecast() + y_distn = list(map(lambda x: process_dens_forecast(x, self.n_features_in_), out_forecast.get('forecast'))) + return { + "forecast": np.concatenate(list(map(lambda x: np.mean(x, axis = 0), y_distn)), axis = 0), + "se": np.concatenate(list(map(lambda x: np.std(x, axis = 0, ddof=1), y_distn)), axis = 0), + "lower": np.concatenate(list(map(lambda x: np.quantile(x, level / 2, axis = 0), y_distn)), axis = 0), + "upper": np.concatenate(list(map(lambda x: np.quantile(x, 1 - level / 2, axis = 0), y_distn)), axis = 0), + "lpl": out_forecast.get('lpl') + } + + def expand_forecast(self, n_ahead: int, test, level = .05, sparse = False, sv = True): + """Expanding-window forecasting + + Parameters + ---------- + n_ahead : int + Forecast next `n_ahead` time point. + test : array-like + Test set to forecast + level : float + Level for credible interval, by default .05 + sparse : bool + Apply restriction to forecasting, by default False + sv : bool + Use SV term in case of SV model, by default True + + Returns + ------- + dict + Density forecasting results + - "forecast" (ndarray): Posterior mean of forecasting + - "se" (ndarray): Standard error of forecasting + - "lower" (ndarray): Lower quantile of forecasting + - "upper" (ndarray): Upper quantile of forecasting + - "lpl" (float): Average log-predictive likelihood + """ + fit_record = concat_params(self.param_, self.param_names_) + test = check_np(test) + n_horizon = test.shape[0] - n_ahead + 1 + chunk_size = n_horizon * self.chains_ // self.thread_ + # Check threads and chunk size + if type(self.cov_spec_) == LdltConfig: + forecaster = LdltVharExpand( + self.y_, self.week_, self.month_, self.chains_, self.iter_, self.burn_, self.thin_, + sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + else: + forecaster = SvVharExpand( + self.y_, self.week_, self.month_, self.chains_, self.iter_, self.burn_, self.thin_, + sv, sparse, fit_record, + self.cov_spec_.to_dict(), self.spec_.to_dict(), self.intercept_spec_.to_dict(), + self.init_, self._prior_type, + self._group_id, self._own_id, self._cross_id, self.group_, + self.fit_intercept, n_ahead, test, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_ * n_horizon).reshape(self.chains_, -1).T, + np.random.randint(low = 1, high = np.iinfo(np.int32).max, size = self.chains_), + self.thread_, chunk_size + ) + out_forecast = forecaster.returnForecast() + y_distn = list(map(lambda x: process_dens_forecast(x, self.n_features_in_), out_forecast.get('forecast'))) + return { + "forecast": np.concatenate(list(map(lambda x: np.mean(x, axis = 0), y_distn)), axis = 0), + "se": np.concatenate(list(map(lambda x: np.std(x, axis = 0, ddof=1), y_distn)), axis = 0), + "lower": np.concatenate(list(map(lambda x: np.quantile(x, level / 2, axis = 0), y_distn)), axis = 0), + "upper": np.concatenate(list(map(lambda x: np.quantile(x, 1 - level / 2, axis = 0), y_distn)), axis = 0), + "lpl": out_forecast.get('lpl') + } + + def spillover(self): + pass + + def dynamic_spillover(self): + pass \ No newline at end of file diff --git a/python/src/bvhar/model/_ols.py b/python/src/bvhar/model/_ols.py new file mode 100644 index 00000000..b3468b5b --- /dev/null +++ b/python/src/bvhar/model/_ols.py @@ -0,0 +1,166 @@ +from ..utils._misc import check_np, get_var_intercept +from .._src._ols import OlsVar, OlsVhar + +class _Vectorautoreg: + """Base class for OLS""" + def __init__(self, data, lag, p, fit_intercept = True, method = "nor"): + if method not in ["nor", "chol", "qr"]: + raise ValueError(f"Argument ('method') '{method}' is not valid: Choose between {['nor', 'chol', 'qr']}") + if lag == p: + lag_name = "lag" + else: + lag_name = "month" + self.method = { + "nor": 1, + "chol": 2, + "qr": 3 + }.get(method, None) + self.y = check_np(data) + self.n_features_in_ = self.y.shape[1] + # if self.y.shape[0] <= lag: + # raise ValueError(f"'data' rows must be larger than '{lag_name}' = {lag}") + # self.p = lag + self.p_ = p # 3 in VHAR + self.lag_ = lag # month in VHAR + if self.y.shape[0] <= self.lag_: + raise ValueError(f"'data' rows must be larger than '{lag_name}' = {self.lag_}") + self.fit_intercept = fit_intercept + self._model = None + self.coef_ = None + self.intercept_ = None + self.cov_ = None + + def fit(self): + """Fit the model + Returns + ------- + self : object + An instance of the estimator. + """ + fit = self._model.returnOlsRes() + self.coef_ = fit.get("coefficients") + self.intercept_ = get_var_intercept(self.coef_, self.p_, self.fit_intercept) + self.cov_ = fit.get("covmat") + + def predict(self): + pass + + def roll_forecast(self): + pass + + def expand_forecast(self): + pass + + def spillover(self): + pass + + def dynamic_spillover(self): + pass + +class VarOls(_Vectorautoreg): + """OLS for Vector autoregressive model + + Fits VAR model using OLS. + + Parameters + ---------- + data : array-like + Time series data of which columns indicate the variables + lag : int + VAR lag, by default 1 + fit_intercept : bool + Include constant term in the model, by default True + method : str + Normal equation solving method + - "nor": projection matrix (default) + - "chol": LU decomposition + - "qr": QR decomposition) + + Attributes + ---------- + coef_ : ndarray + VAR coefficient matrix. + + intercept_ : ndarray + VAR model constant vector. + + cov_ : ndarray + VAR covariance matrix. + + n_features_in_ : int + Number of variables. + """ + def __init__(self, data, lag = 1, fit_intercept = True, method = "nor"): + super().__init__(data, lag, lag, fit_intercept, method) + self._model = OlsVar(self.y, self.p_, self.fit_intercept, self.method) + + def predict(self): + pass + + def roll_forecast(self): + pass + + def expand_forecast(self): + pass + + def spillover(self): + pass + + def dynamic_spillover(self): + pass + +class VharOls(_Vectorautoreg): + """OLS for Vector heterogeneous autoregressive model + + Fits VHAR model using OLS. + + Parameters + ---------- + data : array-like + Time series data of which columns indicate the variables + week : int + VHAR weekly order, by default 5 + month : int + VHAR monthly order, by default 22 + fit_intercept : bool + Include constant term in the model, by default True + method : str + Normal equation solving method + - "nor": projection matrix (default) + - "chol": LU decomposition + - "qr": QR decomposition) + + Attributes + ---------- + coef_ : ndarray + VHAR coefficient matrix. + + intercept_ : ndarray + VHAR model constant vector. + + cov_ : ndarray + VHAR covariance matrix. + + n_features_in_ : int + Number of variables. + """ + def __init__(self, data, week = 5, month = 22, fit_intercept = True, method = "nor"): + super().__init__(data, month, 3, fit_intercept, method) + self.week_ = week + self.month_ = self.lag_ # or self.lag_ = [week, month] + self._model = OlsVhar(self.y, week, self.lag_, self.fit_intercept, self.method) + + def predict(self): + pass + + def roll_forecast(self): + pass + + def expand_forecast(self): + pass + + def spillover(self): + pass + + def dynamic_spillover(self): + pass \ No newline at end of file diff --git a/python/src/bvhar/model/_spec.py b/python/src/bvhar/model/_spec.py new file mode 100644 index 00000000..c7280ac6 --- /dev/null +++ b/python/src/bvhar/model/_spec.py @@ -0,0 +1,319 @@ +import numpy as np +from math import sqrt + +class LdltConfig: + def __init__(self, ig_shape = 3, ig_scale = .01): + self.process = "Homoskedastic" + self.prior = "Cholesky" + self.shape = self._validate(ig_shape, "ig_shape") + self.scale = self._validate(ig_scale, "ig_scale") + + def _validate(self, value, member): + if isinstance(value, int): + return [float(value)] + elif isinstance(value, (float, np.number)): + return [value] + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) == 0: + raise ValueError(f"'{member}' cannot be empty.") + return np.array(value) + else: + raise TypeError(f"'{member}' should be a number or a numeric array.") + + def validate(self, value, member): + self._validate(self, value, member) + + def update(self, n_dim: int): + if len(self.shape) == 1: + self.shape = np.repeat(self.shape, n_dim) + if len(self.scale) == 1: + self.scale = np.repeat(self.scale, n_dim) + + def to_dict(self): + return { + "shape": self.shape, + "scale": self.scale + } + +class SvConfig(LdltConfig): + def __init__(self, ig_shape = 3, ig_scale = .01, initial_mean = 1, initial_prec = .1): + super().__init__(ig_shape, ig_scale) + self.process = "SV" + self.initial_mean = self.validate(initial_mean, "initial_mean", 1) + self.initial_prec = self.validate(initial_prec, "initial_prec", 2) + + def validate(self, value, member, n_dim): + if isinstance(value, (int, float, np.number)): + return [value] if n_dim == 1 else value * np.identity(1) + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) == 0: + raise ValueError(f"'{member} cannot be empty.") + value_array = np.array(value) + if value_array.ndim > 2: + raise ValueError(f"'{member} has wrong dim = {n_dim}.") + elif value_array.ndim != n_dim: + raise ValueError(f"'{member}' should be {n_dim}-dim.") + return value_array + else: + raise TypeError(f"'{member}' should be a number or a numeric array.") + + def update(self, n_dim: int): + super().update(n_dim) + if isinstance(self.initial_mean, (int, float, np.number)) or (hasattr(self.initial_mean, '__len__') and len(self.initial_mean) == 1): + self.initial_mean = np.repeat(self.initial_mean, n_dim) + if isinstance(self.initial_prec, (int, float, np.number)) or (hasattr(self.initial_prec, '__len__') and len(self.initial_prec) == 1): + self.initial_prec = self.initial_prec[0] * np.identity(n_dim) + + def to_dict(self): + return { + "shape": self.shape, + "scale": self.scale, + "initial_mean": self.initial_mean, + "initial_prec": self.initial_prec + } + +class InterceptConfig: + def __init__(self, mean = 0, sd = .1): + self.process = "Intercept" + self.prior = "Normal" + self.mean_non = self.validate(mean, "mean") + self.sd_non = self.validate(sd, "sd") + + def validate(self, value, member): + # if isinstance(value, int): + # return [float(value)] + # elif isinstance(value, (float, np.number)): + # return [value] + if isinstance(value, int): + return float(value) + if isinstance(value, (float, np.number)): + return value + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) == 0: + raise ValueError(f"'{member}' cannot be empty.") + return np.array(value) + else: + raise TypeError(f"'{member}' should be a number or a numeric array.") + + def update(self, n_dim: int): + if isinstance(self.mean_non, (int, float, np.number)) or (hasattr(self.mean_non, '__len__') and len(self.mean_non) == 1): + self.mean_non = np.repeat(self.mean_non, n_dim) + if not isinstance(self.sd_non, (int, float, np.number)) or (hasattr(self.sd_non, '__len__') and len(self.sd_non) > 1): + raise ValueError("'sd_non' should be a number.") + + def to_dict(self): + return { + "mean_non": self.mean_non, + "sd_non": self.sd_non + } + +class _BayesConfig: + def __init__(self, prior): + self.prior = prior + + def validate(self, value, member, n_size = None): + # if isinstance(value, int): + # return [float(value)] + # elif isinstance(value, (float, np.number)): + # return [value] + if isinstance(value, (int, float, np.number)): + return value + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) == 0: + raise ValueError(f"'{member}' cannot be empty.") + elif n_size is not None and len(value) != n_size: + raise ValueError(f"'{member}' length must be {n_size}.") + value_array = np.array(value) + if value_array.ndim > 2: + raise ValueError(f"'{member} has wrong dim = {value_array.ndim}.") + return value_array + else: + raise TypeError(f"'{member}' should be a number or a numeric array.") + + def update(self): + pass + + def to_dict(self): + pass + +class SsvsConfig(_BayesConfig): + def __init__( + self, coef_spike_scl = .01, coef_slab_shape = .01, coef_slab_scl = .01, coef_s1 = [1, 1], coef_s2 = [1, 1], + chol_spike_scl = .01, chol_slab_shape = .01, chol_slab_scl = .01, + chol_s1 = 1, chol_s2 = 1 + ): + super().__init__("SSVS") + self.coef_spike_scl = self.validate(coef_spike_scl, "coef_spike_scl") + self.coef_slab_shape = self.validate(coef_slab_shape, "coef_slab_shape") + self.coef_slab_scl = self.validate(coef_slab_scl, "coef_slab_scl") + self.coef_s1 = self.validate(coef_s1, "coef_s1", 2) + self.coef_s2 = self.validate(coef_s2, "coef_s1", 2) + self.chol_spike_scl = self.validate(chol_spike_scl, "chol_spike_scl") + self.chol_slab_shape = self.validate(chol_slab_shape, "chol_slab_shape") + self.chol_slab_scl = self.validate(chol_slab_scl, "chol_slab_scl") + self.chol_s1 = self.validate(chol_s1, "chol_s1") + self.chol_s2 = self.validate(chol_s2, "chol_s1") + + def update(self, grp_id: np.array, own_id: np.array, cross_id: np.array): + if len(self.coef_s1) == 2: + coef_s1 = np.zeros(len(grp_id)) + coef_s1[np.isin(grp_id, own_id)] = self.coef_s1[0] + coef_s1[np.isin(grp_id, cross_id)] = self.coef_s1[1] + self.coef_s1 = coef_s1 + if len(self.coef_s2) == 2: + coef_s2 = np.zeros(len(grp_id)) + coef_s2[np.isin(grp_id, own_id)] = self.coef_s2[0] + coef_s2[np.isin(grp_id, cross_id)] = self.coef_s2[1] + self.coef_s2 = coef_s2 + + def to_dict(self): + return { + "coef_spike_scl": self.coef_spike_scl, + "coef_slab_shape": self.coef_slab_shape, + "coef_slab_scl": self.coef_slab_scl, + "coef_s1": self.coef_s1, + "coef_s2": self.coef_s2, + "chol_spike_scl": self.chol_spike_scl, + "chol_slab_shape": self.chol_slab_shape, + "chol_slab_scl": self.chol_slab_scl, + "chol_s1": self.chol_s1, + "chol_s2": self.chol_s2 + } + +class HorseshoeConfig(_BayesConfig): + def __init__(self): + super().__init__("Horseshoe") + + def to_dict(self): + return dict() + +class MinnesotaConfig(_BayesConfig): + def __init__(self, sig = None, lam = .1, delt = None, is_long = False, eps = 1e-04): + super().__init__("Minnesota") + self.sig = None + if sig is not None: + self.sig = self.validate(sig, "sig") + # self.lam = self.validate(lam, "lam") + self.lam = lam + if type(self.lam) == LambdaConfig: + self.lam = [lam.shape_, lam.rate_] # shape and rate + self.prior = "HMN" + self.delt = None + if delt is not None or isinstance(delt, np.ndarray): + self.delt = self.validate(delt, "delt") + self.eps = self.validate(eps, "eps") + self.p = None + if is_long: + self.weekly = None + self.monthly = None + + def update(self, y: np.array, p, n_dim: int): + self.p = p + if self.sig is None: + self.sig = np.apply_along_axis(np.std, 0, y) + if self.delt is None: + self.delt = np.repeat(0, n_dim) + if hasattr(self, "weekly"): + self.weekly = np.repeat(0, n_dim) + self.monthly = np.repeat(0, n_dim) + + def to_dict(self): + if hasattr(self, "weekly"): + if isinstance(self.lam, list): + return { + "shape": self.lam[0], + "rate": self.lam[1], + "p": self.p, + "sigma": self.sig, + "eps": self.eps, + "daily": self.delt, + "weekly": self.weekly, + "monthly": self.monthly + } + else: + return { + "lambda": self.lam, + "p": self.p, + "sigma": self.sig, + "eps": self.eps, + "daily": self.delt, + "weekly": self.weekly, + "monthly": self.monthly + } + if isinstance(self.lam, list): + return { + "shape": self.lam[0], + "rate": self.lam[1], + "p": self.p, + "sigma": self.sig, + "eps": self.eps, + "delta": self.delt + } + return { + "lambda": self.lam, + "p": self.p, + "sigma": self.sig, + "eps": self.eps, + "delta": self.delt + } + +class LambdaConfig: + def __init__(self, shape = .01, rate = .01, eps = 1e-04): + self.shape_ = self.validate(shape, "shape") + self.rate_ = self.validate(rate, "rate") + + def validate(self, value, member): + if isinstance(value, int): + return float(value) + if isinstance(value, (float, np.number)): + return value + elif isinstance(value, (list, tuple, np.ndarray)): + if len(value) == 0: + raise ValueError(f"'{member}' cannot be empty.") + return np.array(value) + else: + raise TypeError(f"'{member}' should be a number or a numeric array.") + + def update(self, mode, sd): + self.shape_ = (2 + mode ** 2 / (sd ** 2) + sqrt((2 + mode ** 2 / (sd ** 2)) ** 2 - 4)) / 2 + self.rate_ = sqrt(self.shape_) / sd + +class DlConfig(_BayesConfig): + def __init__(self, dir_grid: int = 100, shape = .01, rate = .01): + super().__init__("DL") + self.grid_size = self.validate(dir_grid, "dir_grid") + self.shape = self.validate(shape, "shape") + self.rate = self.validate(rate, "rate") + + def to_dict(self): + return { + "grid_size": self.grid_size, + "shape": self.shape, + "rate": self.rate + } + +class NgConfig(_BayesConfig): + def __init__( + self, shape_sd = .01, group_shape = .01, group_scale = .01, + global_shape = .01, global_scale = .01, + contem_global_shape = .01, contem_global_scale = .01 + ): + super().__init__("NG") + self.shape_sd = self.validate(shape_sd, "shape_sd") + self.group_shape = self.validate(group_shape, "group_shape") + self.group_scale = self.validate(group_scale, "group_scale") + self.global_shape = self.validate(global_shape, "global_shape") + self.global_scale = self.validate(global_scale, "global_scale") + self.contem_global_shape = self.validate(contem_global_shape, "contem_global_shape") + self.contem_global_scale = self.validate(contem_global_scale, "contem_global_scale") + + def to_dict(self): + return { + "shape_sd": self.shape_sd, + "group_shape": self.group_shape, + "group_scale": self.group_scale, + "global_shape": self.global_shape, + "global_scale": self.global_scale, + "contem_global_shape": self.contem_global_shape, + "contem_global_scale": self.contem_global_scale + } \ No newline at end of file diff --git a/python/src/bvhar/random/__init__.py b/python/src/bvhar/random/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/src/bvhar/random/normal.cpp b/python/src/bvhar/random/normal.cpp new file mode 100644 index 00000000..2b2f8caf --- /dev/null +++ b/python/src/bvhar/random/normal.cpp @@ -0,0 +1,55 @@ +// #include +#include +#include + +// namespace py = pybind11; + +// MVN +Eigen::MatrixXd generate_mnormal(int num_sim, Eigen::VectorXd mean, Eigen::MatrixXd covariance, unsigned int seed, int method) { + boost::random::mt19937 rng(seed); + int dim = covariance.cols(); + if (covariance.rows() != dim) { + throw py::value_error("Invalid 'covariance' dimension."); + } + if (dim != mean.size()) { + throw py::value_error("Invalid 'mean' size."); + } + Eigen::MatrixXd standard_normal(num_sim, dim); + Eigen::MatrixXd res(num_sim, dim); + for (int i = 0; i < num_sim; i++) { + for (int j = 0; j < standard_normal.cols(); j++) { + standard_normal(i, j) = bvhar::normal_rand(rng); + } + } + switch (method) { + case 1: { + // res = standard_normal * covariance.sqrt(); + throw py::value_error("Use eigen decomposition later"); + break; + } + case 2: { + res = standard_normal * covariance.llt().matrixU(); // use upper because now dealing with row vectors + break; + } + default: { + throw py::value_error("No 'method' defined"); + } + } + res.rowwise() += mean.transpose(); + return res; +} + +// MNIW + +// GIG +Eigen::VectorXd generate_gig(int num_sim, double lambda, double psi, double chi, unsigned int seed) { + boost::random::mt19937 rng(seed); + return bvhar::sim_gig(num_sim, lambda, psi, chi, rng); +} + +PYBIND11_MODULE(normal, m) { + m.doc() = "Random samplers related to Gaussain"; + + m.def("generate_mnormal", &generate_mnormal, "Generates multivariate gaussian random vectors"); + m.def("generate_gig", &generate_gig, "A function that GIG random variates"); +} diff --git a/python/src/bvhar/utils/__init__.py b/python/src/bvhar/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/src/bvhar/utils/_misc.py b/python/src/bvhar/utils/_misc.py new file mode 100644 index 00000000..01efef3b --- /dev/null +++ b/python/src/bvhar/utils/_misc.py @@ -0,0 +1,160 @@ +import numpy as np +import pandas as pd + +def check_numeric(data : np.array): + """Check if the array consists of numeric + + Parameters + ---------- + data : np.array + 2-dim array + + Raises + ------ + ValueError + If the array does not consists of only numeric values. + """ + if not np.issubdtype(data.dtype, np.number) or np.issubdtype(data.dtype, np.bool_): + raise ValueError("All elements should be numeric.") + +# """Check if the array consists of numeric + +# :param data: 2-dim array +# :type data: boolean +# """ + +def make_fortran_array(object): + """Make the array for Eigen input + + Parameters + ---------- + object : np.array + Array to be used as a input for Eigen::Matrix + + Returns + ------- + array + Array available for Eigen::Matrix input + """ + # if not arr.flags.f_contiguous or arr.dtype != np.float64: + # return np.asfortranarray(arr, dtype=np.float64) + # return arr + if isinstance(object, np.ndarray): + if not object.flags.f_contiguous or object.dtype != np.float64: + return np.asfortranarray(object, dtype=np.float64) + elif isinstance(object, dict): + return {k: make_fortran_array(v) for k, v in object.items()} + elif isinstance(object, list): + return [make_fortran_array(item) for item in object] + return object + +def check_np(data): + """Check if the dataset is numpy array for Eigen + + Parameters + ---------- + data : None + Table-format data + + Returns + ------- + array + Result of :func:`make_fortran_array` + + Raises + ------ + ValueError + If the array.ndim is not 2. + ValueError + If the list.ndim is not 2. + ValueError + If the data is not array, list, nor pd.DataFrame. + """ + if isinstance(data, np.ndarray): + if data.ndim == 2: + check_numeric(data) + return make_fortran_array(data) + else: + raise ValueError("Numpy array must be 2-dim.") + elif isinstance(data, list): + array_data = np.array(data) + if array_data.ndim == 2: + check_numeric(array_data) + return make_fortran_array(array_data) + else: + raise ValueError("np.array(list) should give 2-dim array.") + else: + if isinstance(data, pd.DataFrame): + array_data = data.values + check_numeric(array_data) + return make_fortran_array(array_data) + # Add polars? + raise ValueError("Unsupported data type.") + +def get_var_intercept(coef : np.array, lag: int, fit_intercept : bool): + dim_design, dim_data = coef.shape + if not fit_intercept: + return np.repeat(0, dim_data) + if dim_design != dim_data * lag + 1: + ValueError() + return coef[-1] + +def build_grpmat(p, dim_data, minnesota = "longrun"): + if minnesota not in ["longrun", "short", "no"]: + raise ValueError(f"Argument ('minnesota')={method} is not valid: Choose between {['longrun', 'short', 'no']}") + if minnesota == "no": + return np.full((p * dim_data, dim_data), 1) + res = [np.identity(dim_data) + 1] + for i in range(1, p): + if minnesota == "longrun": + lag_grp = np.identity(dim_data) + (i * 2 + 1) + else: + lag_grp = np.full((dim_data, dim_data), i + 2) + res.append(lag_grp) + return np.vstack(res) + +def process_record(record_list: list): + rec_names = list(record_list[0].keys()) + return [name.replace('_record', '') for name in rec_names] + +def concat_chain(record_list: list): + record_concat = pd.DataFrame() + tot_draw_n = 0 + for chain_id, chain_dict in enumerate(record_list): + param_record = pd.DataFrame() + for rec_names, record in chain_dict.items(): + param = rec_names.replace('_record', '') + n_col = record.shape[1] + chain_record = pd.DataFrame( + record, + columns=[param + f"[{i}]" for i in range(1, n_col + 1)] + ) + n_draw = len(chain_record) + param_record = pd.concat([param_record, chain_record], axis=1) + param_record['_chain'] = chain_id + param_record['_iteration'] = range(1, n_draw + 1) + param_record['_draw'] = range(tot_draw_n + 1, tot_draw_n + n_draw + 1) + tot_draw_n += n_draw + record_concat = pd.concat([record_concat, param_record], axis=0) + return record_concat + +def concat_params(record: pd.DataFrame, param_names: str): + res = {} + # n_chains = record['_chain'].nunique() + for _name in param_names: + param_columns = [col for col in record.columns if col.startswith(_name)] + param_record = record[param_columns + ['_chain']] + param_record_chain = [df for _, df in param_record.groupby('_chain')] + array_chain = [df.drop('_chain', axis=1).values for df in param_record_chain] + res[f"{_name}_record"] = array_chain + return res + +def process_dens_forecast(pred_list: list, n_dim: int): + # shape_pred = pred_list[0].shape # (step, dim * draw) + # n_ahead = shape_pred[0] + # n_draw = int(shape_pred[1] / n_dim) + n_draw = int(pred_list[0].shape[1] / n_dim) + res = [] + for arr in pred_list: + res.append([arr[:, range(id * n_dim, id * n_dim + n_dim)] for id in range(n_draw)]) + return np.concatenate(res, axis=0) diff --git a/python/src/bvhar/utils/checkomp.cpp b/python/src/bvhar/utils/checkomp.cpp new file mode 100644 index 00000000..0b88dacd --- /dev/null +++ b/python/src/bvhar/utils/checkomp.cpp @@ -0,0 +1,34 @@ +#include +#include + +namespace py = pybind11; + +int get_maxomp() { + return omp_get_max_threads(); +} + +bool is_omp() { +#ifdef _OPENMP + return true; +#else + return false; +#endif +} + +void check_omp() { +#ifdef _OPENMP + // std::cout << "OpenMP threads: " << omp_get_max_threads() << "\n"; + py::print("OpenMP threads: ", omp_get_max_threads()); +#else + // Rcpp::Rcout << "OpenMP not available in this machine." << "\n"; + py::print("OpenMP not available in this machine."); +#endif +} + +PYBIND11_MODULE(checkomp, m) { + m.doc() = "Check OpenMP configuration"; + + m.def("get_maxomp", &get_maxomp, "Show the maximum thread numbers"); + m.def("is_omp", &is_omp, "Give boolean for OpenMP"); + m.def("check_omp", &check_omp, "Print if OpenMP is enabled"); +} \ No newline at end of file diff --git a/python/tests/__init__.py b/python/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/tests/test_bayes.py b/python/tests/test_bayes.py new file mode 100644 index 00000000..94e41eb4 --- /dev/null +++ b/python/tests/test_bayes.py @@ -0,0 +1,219 @@ +import pytest +from bvhar.model import VarBayes, VharBayes +from bvhar.model import LdltConfig, SvConfig, InterceptConfig +from bvhar.model import SsvsConfig, HorseshoeConfig, MinnesotaConfig, LambdaConfig, NgConfig, DlConfig +from bvhar.datasets import load_vix +import numpy as np + +def help_var_bayes( + dim_data, var_lag, data, + num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + bayes_config, cov_config, + test_y = None, n_ahead = None, pred = False, roll = False, expand = False, spillover = False +): + np.random.seed(1) + fit_bayes = VarBayes( + data, var_lag, + num_chains, + num_iter, + num_burn, + thin, + bayes_config, + cov_config, + InterceptConfig(), + intercept, + minnesota, + False, + num_threads + ) + fit_bayes.fit() + + assert fit_bayes.n_features_in_ == dim_data + assert fit_bayes.coef_.shape == (dim_data * var_lag + 1, dim_data) + assert fit_bayes.intercept_.shape == (dim_data,) + + if pred: + pred_out = fit_bayes.predict(n_ahead, sparse = True) + assert pred_out['forecast'].shape == (n_ahead, dim_data) + assert pred_out['se'].shape == (n_ahead, dim_data) + assert pred_out['lower'].shape == (n_ahead, dim_data) + assert pred_out['upper'].shape == (n_ahead, dim_data) + if roll: + roll_out = fit_bayes.roll_forecast(1, test_y, sparse = True) + assert roll_out['forecast'].shape == (n_ahead, dim_data) + assert roll_out['se'].shape == (n_ahead, dim_data) + assert roll_out['lower'].shape == (n_ahead, dim_data) + assert roll_out['upper'].shape == (n_ahead, dim_data) + if expand: + roll_out = fit_bayes.expand_forecast(1, test_y, sparse = True) + assert roll_out['forecast'].shape == (n_ahead, dim_data) + assert roll_out['se'].shape == (n_ahead, dim_data) + assert roll_out['lower'].shape == (n_ahead, dim_data) + assert roll_out['upper'].shape == (n_ahead, dim_data) + +def test_var_bayes(): + num_data = 30 + dim_data = 2 + var_lag = 3 + etf_vix = load_vix() + data = etf_vix.to_numpy()[:num_data, :dim_data] + n_ahead = 5 + data_out = etf_vix.to_numpy()[num_data:(num_data + n_ahead), :dim_data] + + num_chains = 2 + num_threads = 1 + num_iter = 5 + num_burn = 2 + thin = 1 + intercept = True + minnesota = True + + help_var_bayes( + dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + SsvsConfig(), LdltConfig(), + data_out, n_ahead, True, True, True + ) + help_var_bayes( + dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + HorseshoeConfig(), LdltConfig(), + data_out, n_ahead + ) + help_var_bayes( + dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + MinnesotaConfig(lam=LambdaConfig()), LdltConfig() + ) + help_var_bayes( + dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + NgConfig(), LdltConfig() + ) + help_var_bayes( + dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + DlConfig(), LdltConfig() + ) + + # help_var_bayes( + # dim_data, var_lag, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + # SsvsConfig(), SvConfig() + # ) + + with pytest.warns(UserWarning, match=f"'n_thread = 3 > 'n_chain' = 2' will not use every thread. Specify as 'n_thread <= 'n_chain'."): + VarBayes( + data, var_lag, 2, num_iter, num_burn, thin, + SsvsConfig(), LdltConfig(), InterceptConfig(), + intercept, minnesota, False, 3 + ) + + with pytest.raises(ValueError, match=f"'data' rows must be larger than 'lag' = {var_lag}"): + etf_vix = load_vix() + data = etf_vix.iloc[:(var_lag - 1), :dim_data] + VarBayes( + data, var_lag, num_chains, num_iter, num_burn, thin, + SsvsConfig(), LdltConfig(), InterceptConfig(), + intercept, minnesota, False, num_threads + ) + +def help_vhar_bayes( + dim_data, week, month, data, + num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + bayes_config, cov_config, + test_y = None, n_ahead = None, pred = False, roll = False, expand = False, spillover = False +): + np.random.seed(1) + fit_bayes = VharBayes( + data, week, month, + num_chains, + num_iter, + num_burn, + thin, + bayes_config, + cov_config, + InterceptConfig(), + intercept, + minnesota, + False, + num_threads + ) + fit_bayes.fit() + + assert fit_bayes.n_features_in_ == dim_data + assert fit_bayes.coef_.shape == (dim_data * 3 + 1, dim_data) + assert fit_bayes.intercept_.shape == (dim_data,) + + if pred: + pred_out = fit_bayes.predict(n_ahead, sparse = True) + assert pred_out['forecast'].shape == (n_ahead, dim_data) + assert pred_out['se'].shape == (n_ahead, dim_data) + assert pred_out['lower'].shape == (n_ahead, dim_data) + assert pred_out['upper'].shape == (n_ahead, dim_data) + if roll: + roll_out = fit_bayes.roll_forecast(1, test_y, sparse = True) + assert roll_out['forecast'].shape == (n_ahead, dim_data) + assert roll_out['se'].shape == (n_ahead, dim_data) + assert roll_out['lower'].shape == (n_ahead, dim_data) + assert roll_out['upper'].shape == (n_ahead, dim_data) + if expand: + roll_out = fit_bayes.expand_forecast(1, test_y, sparse = True) + assert roll_out['forecast'].shape == (n_ahead, dim_data) + assert roll_out['se'].shape == (n_ahead, dim_data) + assert roll_out['lower'].shape == (n_ahead, dim_data) + assert roll_out['upper'].shape == (n_ahead, dim_data) + +def test_vhar_bayes(): + num_data = 30 + dim_data = 3 + week = 5 + month = 22 + etf_vix = load_vix() + data = etf_vix.iloc[:num_data, :dim_data] + n_ahead = 5 + data_out = etf_vix.iloc[num_data:(num_data + n_ahead), :dim_data] + + num_chains = 2 + num_threads = 1 + num_iter = 5 + num_burn = 2 + thin = 1 + intercept = True + minnesota = "longrun" + + help_vhar_bayes( + dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + SsvsConfig(), LdltConfig(), + data_out, n_ahead, True, True, True + ) + help_vhar_bayes( + dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + HorseshoeConfig(), LdltConfig() + ) + help_vhar_bayes( + dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + MinnesotaConfig(lam=LambdaConfig()), LdltConfig() + ) + help_vhar_bayes( + dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + NgConfig(), LdltConfig() + ) + help_vhar_bayes( + dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + DlConfig(), LdltConfig() + ) + + # help_vhar_bayes( + # dim_data, week, month, data, num_chains, num_threads, num_iter, num_burn, thin, intercept, minnesota, + # SsvsConfig(), SvConfig() + # ) + + with pytest.warns(UserWarning, match=f"'n_thread = 3 > 'n_chain' = {num_chains}' will not use every thread. Specify as 'n_thread <= 'n_chain'."): + VharBayes( + data, week, month, num_chains, num_iter, num_burn, thin, + SsvsConfig(), LdltConfig(), InterceptConfig(), + intercept, minnesota, False, 3 + ) + + with pytest.raises(ValueError, match=f"'data' rows must be larger than 'lag' = {month}"): + data = etf_vix.iloc[:(month - 1), :dim_data] + VharBayes( + data, week, month, num_chains, num_iter, num_burn, thin, + SsvsConfig(), LdltConfig(), InterceptConfig(), + intercept, minnesota, False, num_threads + ) \ No newline at end of file diff --git a/python/tests/test_misc.py b/python/tests/test_misc.py new file mode 100644 index 00000000..f3dc5525 --- /dev/null +++ b/python/tests/test_misc.py @@ -0,0 +1,35 @@ +import pytest +from bvhar.utils import _misc +import numpy as np +from numpy.testing import assert_array_equal + +def test_build_grpmat(): + p = 3 + dim_data = 2 + grp_mat_longrun = _misc.build_grpmat(p, dim_data, "longrun") + expected_longrun = np.array([[2, 1], + [1, 2], + [4, 3], + [3, 4], + [6, 5], + [5, 6]]) + + grp_mat_short = _misc.build_grpmat(p, dim_data, "short") + expected_short = np.array([[2, 1], + [1, 2], + [3, 3], + [3, 3], + [4, 4], + [4, 4]]) + + grp_mat_no = _misc.build_grpmat(p, dim_data, "no") + expected_no = np.array([[1, 1], + [1, 1], + [1, 1], + [1, 1], + [1, 1], + [1, 1]]) + + assert_array_equal(grp_mat_longrun, expected_longrun) + assert_array_equal(grp_mat_short, expected_short) + assert_array_equal(grp_mat_no, expected_no) \ No newline at end of file diff --git a/python/tests/test_ols.py b/python/tests/test_ols.py new file mode 100644 index 00000000..8029e26f --- /dev/null +++ b/python/tests/test_ols.py @@ -0,0 +1,46 @@ +import pytest +from bvhar.model import VarOls, VharOls +import numpy as np + +def test_var(): + num_data = 30 + dim_data = 3 + var_lag = 2 + data = np.random.randn(num_data, dim_data) + + fit_var = VarOls(data, var_lag, True, "nor") + fit_var_llt = VarOls(data, var_lag, True, "chol") + fit_var_qr = VarOls(data, var_lag, True, "qr") + fit_var.fit() + fit_var_llt.fit() + fit_var_qr.fit() + + assert fit_var.n_features_in_ == dim_data + assert fit_var.coef_.shape == (dim_data * var_lag + 1, dim_data) + assert fit_var.intercept_.shape == (dim_data,) + + data = np.random.randn(var_lag - 1, dim_data) + with pytest.raises(ValueError, match=f"'data' rows must be larger than 'lag' = {var_lag}"): + fit_var = VarOls(data, var_lag, True, "nor") + +def test_vhar(): + num_data = 30 + dim_data = 3 + week = 5 + month = 22 + data = np.random.randn(num_data, dim_data) + + fit_vhar = VharOls(data, week, month, True, "nor") + fit_vhar_llt = VharOls(data, week, month, True, "chol") + fit_vhar_qr = VharOls(data, week, month, True, "qr") + fit_vhar.fit() + fit_vhar_llt.fit() + fit_vhar_qr.fit() + + assert fit_vhar.n_features_in_ == dim_data + assert fit_vhar.coef_.shape == (dim_data * 3 + 1, dim_data) + assert fit_vhar.intercept_.shape == (dim_data,) + + data = np.random.randn(month - 1, dim_data) + with pytest.raises(ValueError, match=f"'data' rows must be larger than 'month' = {month}"): + fit_vhar = VharOls(data, week, month, True, "nor") \ No newline at end of file diff --git a/src/Makevars b/src/Makevars index e5d4fdc7..249a633f 100644 --- a/src/Makevars +++ b/src/Makevars @@ -2,7 +2,7 @@ # Add -DBOOST_DISABLE_ASSERTS against assertion failures made by boost library # If -DEIGEN_DONT_PARALLELIZE added, multi-threading will be disabled related to OpenMP. # In this package we use the same number of threads specified in OpenMP -PKG_CXXFLAGS = -I../inst/include -DEIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS -DBOOST_DISABLE_ASSERTS $(SHLIB_OPENMP_CXXFLAGS) +PKG_CXXFLAGS = -I../inst/include -DEIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS -DBOOST_DISABLE_ASSERTS $(SHLIB_OPENMP_CXXFLAGS) -DUSE_RCPP ## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R. diff --git a/src/Makevars.win b/src/Makevars.win index e5d4fdc7..249a633f 100644 --- a/src/Makevars.win +++ b/src/Makevars.win @@ -2,7 +2,7 @@ # Add -DBOOST_DISABLE_ASSERTS against assertion failures made by boost library # If -DEIGEN_DONT_PARALLELIZE added, multi-threading will be disabled related to OpenMP. # In this package we use the same number of threads specified in OpenMP -PKG_CXXFLAGS = -I../inst/include -DEIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS -DBOOST_DISABLE_ASSERTS $(SHLIB_OPENMP_CXXFLAGS) +PKG_CXXFLAGS = -I../inst/include -DEIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS -DBOOST_DISABLE_ASSERTS $(SHLIB_OPENMP_CXXFLAGS) -DUSE_RCPP ## With Rcpp 0.11.0 and later, we no longer need to set PKG_LIBS as there is ## no user-facing library. The include path to headers is already set by R.