Skip to content

Commit

Permalink
experiments ci/cd
Browse files Browse the repository at this point in the history
  • Loading branch information
Thomas Schmelzer committed Dec 10, 2023
1 parent 2b65447 commit d44e999
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 14 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/basic.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ jobs:
- name: experiments
shell: bash
run: |
make experiment
make experiment_cicd
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ clean: ## Clean up caches and build artifacts
experiment: install ## Run all experiment
${VENV}/bin/python experiments.py

.PHONY: experiment_cicd
experiment_short: install ## Run all experiment
${VENV}/bin/python experiments_short.py

.PHONY: help
help: ## Display this help screen
@echo -e "\033[1mAvailable commands:\033[0m"
Expand Down
18 changes: 14 additions & 4 deletions experiments/backtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,21 @@ def data_folder():


@lru_cache(maxsize=1)
def load_data() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
def load_data(n=None) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
prices = pd.read_csv(data_folder() / "prices.csv", index_col=0, parse_dates=True)
spread = pd.read_csv(data_folder() / "spreads.csv", index_col=0, parse_dates=True)
volume = pd.read_csv(data_folder() / "volumes.csv", index_col=0, parse_dates=True)
rf = pd.read_csv(data_folder() / "rf.csv", index_col=0, parse_dates=True).iloc[:, 0]
rf = pd.read_csv(data_folder() / "rf.csv", index_col=0, parse_dates=True).squeeze()

# get the last n days of data
n = n or prices.shape[0]
prices = prices.tail(n)

# align the data
spread = spread.loc[prices.index]
volume = volume.loc[prices.index]
rf = rf.loc[prices.index]

return prices, spread, volume, rf


Expand All @@ -47,15 +57,15 @@ def n_assets(self) -> int:


def run_backtest(
strategy: Callable, risk_target: float, verbose: bool = False
strategy: Callable, risk_target: float, verbose: bool = False, n: int = None
) -> tuple[pd.Series, pd.DataFrame]:
"""
Run a simplified backtest for a given strategy.
At time t we use data from t-lookback to t to compute the optimal portfolio
weights and then execute the trades at time t.
"""

prices, spread, volume, rf = load_data()
prices, spread, volume, rf = load_data(n=n)
n_assets = prices.shape[1]

lookback = 500
Expand Down
21 changes: 12 additions & 9 deletions experiments/taming.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,11 @@ def get_basic_data_and_parameters(
return data, param


def main(from_checkpoint: bool = False):
def main(from_checkpoint: bool = False, n: int = None):
annualized_target = 0.10

if not from_checkpoint:
run_all_strategies(annualized_target)
run_all_strategies(annualized_target, n=n)

equal_weights_results = BacktestResult.load("checkpoints/equal_weights.pickle")

Expand All @@ -131,12 +131,15 @@ def main(from_checkpoint: bool = False):
weight_limited_result = BacktestResult.load(
f"checkpoints/weight_limited_{annualized_target}.pickle"
)

leverage_limit_result = BacktestResult.load(
f"checkpoints/leverage_limit_{annualized_target}.pickle"
)

turnover_limit_result = BacktestResult.load(
f"checkpoints/turnover_limit_{annualized_target}.pickle"
)

robust_result = BacktestResult.load(
f"checkpoints/robust_{annualized_target}.pickle"
)
Expand All @@ -151,37 +154,37 @@ def main(from_checkpoint: bool = False):
)


def run_all_strategies(annualized_target: float) -> None:
equal_weights_results = run_backtest(equal_weights, 0.0, verbose=True)
def run_all_strategies(annualized_target: float, n: int = None) -> None:
equal_weights_results = run_backtest(equal_weights, 0.0, verbose=True, n=n)
equal_weights_results.save("checkpoints/equal_weights.pickle")

adjustment_factor = np.sqrt(equal_weights_results.periods_per_year)
sigma_target = annualized_target / adjustment_factor

print("Running basic Markowitz")
basic_result = run_backtest(basic_markowitz, sigma_target, verbose=True)
basic_result = run_backtest(basic_markowitz, sigma_target, verbose=True, n=n)
basic_result.save(f"checkpoints/basic_{annualized_target}.pickle")

print("Running weight-limited Markowitz")
weight_limited_result = run_backtest(
weight_limits_markowitz, sigma_target, verbose=True
weight_limits_markowitz, sigma_target, verbose=True, n=n
)
weight_limited_result.save(f"checkpoints/weight_limited_{annualized_target}.pickle")

print("Running leverage limit Markowitz")
leverage_limit_result = run_backtest(
leverage_limit_markowitz, sigma_target, verbose=True
leverage_limit_markowitz, sigma_target, verbose=True, n=n
)
leverage_limit_result.save(f"checkpoints/leverage_limit_{annualized_target}.pickle")

print("Running turnover limit Markowitz")
turnover_limit_result = run_backtest(
turnover_limit_markowitz, sigma_target, verbose=True
turnover_limit_markowitz, sigma_target, verbose=True, n=n
)
turnover_limit_result.save(f"checkpoints/turnover_limit_{annualized_target}.pickle")

print("Running robust Markowitz")
robust_result = run_backtest(robust_markowitz, sigma_target, verbose=True)
robust_result = run_backtest(robust_markowitz, sigma_target, verbose=True, n=n)
robust_result.save(f"checkpoints/robust_{annualized_target}.pickle")


Expand Down
7 changes: 7 additions & 0 deletions experiments_short.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from pathlib import Path

from experiments.taming import main as taming_main

if __name__ == "__main__":
Path("checkpoints").mkdir(exist_ok=True)
taming_main(n=1005)

0 comments on commit d44e999

Please sign in to comment.