Skip to content

Commit

Permalink
Merge pull request #38 from cvxgrp/37-load-tail-of-data-for-faster-cicd
Browse files Browse the repository at this point in the history
experiments ci/cd
  • Loading branch information
phschiele authored Dec 16, 2023
2 parents ebaac23 + ee1f13b commit c5a986c
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 9 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/basic.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ jobs:
- name: experiments
shell: bash
run: |
make experiment
make experiments
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ fmt: install ## Run autoformatting and linting
clean: ## Clean up caches and build artifacts
@git clean -X -d -f

.PHONY: experiment
experiment: install ## Run all experiment
.PHONY: experiments
experiments: install ## Run all experiment
${VENV}/bin/python experiments.py

.PHONY: help
Expand Down
2 changes: 2 additions & 0 deletions experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@

from experiments.taming import main as taming_main
from experiments.scaling_small import main as scaling_small_main
from experiments.scaling_large import main as scaling_large_main

if __name__ == "__main__":
Path("checkpoints").mkdir(exist_ok=True)
Path("figures").mkdir(exist_ok=True)
scaling_small_main()
scaling_large_main()
taming_main()
5 changes: 5 additions & 0 deletions experiments/backtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from dataclasses import dataclass
from functools import lru_cache
import os
from pathlib import Path
import pickle
import time
Expand All @@ -21,6 +22,10 @@ def load_data() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]
prices = pd.read_csv(data_folder() / "prices.csv", index_col=0, parse_dates=True)
spread = pd.read_csv(data_folder() / "spreads.csv", index_col=0, parse_dates=True)
rf = pd.read_csv(data_folder() / "rf.csv", index_col=0, parse_dates=True).iloc[:, 0]
if os.getenv("CI"):
prices = prices.tail(2000)
spread = spread.tail(2000)
rf = rf.tail(2000)
return prices, spread, rf


Expand Down
7 changes: 5 additions & 2 deletions experiments/scaling_large.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
import cvxpy as cp
import numpy as np
import pandas as pd
from utils import generate_random_inputs
from experiments.utils import generate_random_inputs


def main():
Expand All @@ -11,8 +12,10 @@ def main():
for n_assets, n_factors in scenarios:
print(f"Running scenario with {n_assets} assets and {n_factors} factors")
solvers = [cp.MOSEK] if fitting else [cp.CLARABEL, cp.MOSEK]
solvers = [s for s in solvers if s in cp.installed_solvers()]
for solver in solvers:
for _ in range(1):
n_iters = 1 if os.environ.get("CI") else 30
for _ in range(n_iters):
problem = run_scaling(n_assets, n_factors, solver)
assert problem.status in {
cp.OPTIMAL,
Expand Down
11 changes: 7 additions & 4 deletions experiments/taming.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def get_basic_data_and_parameters(
return data, param


def main(from_checkpoint: bool = True) -> None:
def main(from_checkpoint: bool = False) -> None:
annualized_target = 0.10

if not from_checkpoint:
Expand All @@ -129,12 +129,15 @@ def main(from_checkpoint: bool = True) -> None:
weight_limited_result = BacktestResult.load(
f"checkpoints/weight_limited_{annualized_target}.pickle"
)

leverage_limit_result = BacktestResult.load(
f"checkpoints/leverage_limit_{annualized_target}.pickle"
)

turnover_limit_result = BacktestResult.load(
f"checkpoints/turnover_limit_{annualized_target}.pickle"
)

robust_result = BacktestResult.load(
f"checkpoints/robust_{annualized_target}.pickle"
)
Expand Down Expand Up @@ -165,9 +168,9 @@ def run_all_strategies(annualized_target: float) -> None:
adjustment_factor = np.sqrt(equal_weights_results.periods_per_year)
sigma_target = annualized_target / adjustment_factor

# print("Running basic Markowitz")
# basic_result = run_backtest(basic_markowitz, sigma_target, verbose=True)
# basic_result.save(f"checkpoints/basic_{annualized_target}.pickle")
print("Running basic Markowitz")
basic_result = run_backtest(basic_markowitz, sigma_target, verbose=True)
basic_result.save(f"checkpoints/basic_{annualized_target}.pickle")

print("Running weight-limited Markowitz")
weight_limited_result = run_backtest(
Expand Down

0 comments on commit c5a986c

Please sign in to comment.