Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add benchmark #650

Merged
merged 8 commits into from
Apr 8, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,41 @@ jobs:
- uses: actions/checkout@v4
- name: Test generating docs
run: make docs

benchmark:
needs: [test]
runs-on: ubuntu-20.04
steps:
- name: Check out repository code
uses: actions/checkout@v4

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.11"

- name: Install types
run: |
python -m pip install ./stac_fastapi/types[dev]

- name: Install core api
run: |
python -m pip install ./stac_fastapi/api[dev,benchmark]
vincentsarago marked this conversation as resolved.
Show resolved Hide resolved

- name: Run Benchmark
run: python -m pytest stac_fastapi/tests/benchmarks.py --benchmark-only --benchmark-columns 'min, max, mean, median' --benchmark-json output.json
vincentsarago marked this conversation as resolved.
Show resolved Hide resolved

# - name: Store and benchmark result
# uses: benchmark-action/github-action-benchmark@v1
# with:
# name: STAC FastAPI Benchmarks
# tool: 'pytest'
# output-file-path: output.json
# alert-threshold: '130%'
# comment-on-alert: true
# fail-on-alert: false
# # GitHub API token to make a commit comment
# github-token: ${{ secrets.GITHUB_TOKEN }}
# gh-pages-branch: 'gh-benchmarks'
# # Make a commit only if main
# auto-push: ${{ github.ref == 'refs/heads/main' }}
vincentsarago marked this conversation as resolved.
Show resolved Hide resolved
3 changes: 3 additions & 0 deletions stac_fastapi/api/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
"requests",
"pystac[validation]==1.*",
],
"benchmark": [
"pytest-benchmark",
],
"docs": ["mkdocs", "mkdocs-material", "pdocs"],
}

Expand Down
168 changes: 168 additions & 0 deletions stac_fastapi/api/tests/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
from datetime import datetime
from typing import List, Optional, Union

import pytest
from stac_pydantic.api.utils import link_factory
from starlette.testclient import TestClient

from stac_fastapi.api.app import StacApi
from stac_fastapi.types import stac as stac_types
from stac_fastapi.types.config import ApiSettings
from stac_fastapi.types.core import BaseCoreClient, BaseSearchPostRequest, NumType

collection_links = link_factory.CollectionLinks("/", "test").create_links()
item_links = link_factory.ItemLinks("/", "test", "test").create_links()


collections = [
stac_types.Collection(
id=f"test_collection_{n}",
title="Test Collection",
description="A test collection",
keywords=["test"],
license="proprietary",
extent={
"spatial": {"bbox": [[-180, -90, 180, 90]]},
"temporal": {"interval": [["2000-01-01T00:00:00Z", None]]},
},
links=collection_links.dict(exclude_none=True),
)
for n in range(0, 10)
]

items = [
stac_types.Item(
id=f"test_item_{n}",
type="Feature",
geometry={"type": "Point", "coordinates": [0, 0]},
bbox=[-180, -90, 180, 90],
properties={"datetime": "2000-01-01T00:00:00Z"},
links=item_links.dict(exclude_none=True),
assets={},
)
for n in range(0, 1000)
]


class CoreClient(BaseCoreClient):
def post_search(
self, search_request: BaseSearchPostRequest, **kwargs
) -> stac_types.ItemCollection:
raise NotImplementedError

def get_search(
self,
collections: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
bbox: Optional[List[NumType]] = None,
intersects: Optional[str] = None,
datetime: Optional[Union[str, datetime]] = None,
limit: Optional[int] = 10,
**kwargs,
) -> stac_types.ItemCollection:
raise NotImplementedError

def get_item(self, item_id: str, collection_id: str, **kwargs) -> stac_types.Item:
raise NotImplementedError

def all_collections(self, **kwargs) -> stac_types.Collections:
return stac_types.Collections(
collections=[collections],
links=[
{"href": "test", "rel": "root"},
{"href": "test", "rel": "self"},
{"href": "test", "rel": "parent"},
],
)

def get_collection(self, collection_id: str, **kwargs) -> stac_types.Collection:
return collections[0]

def item_collection(
self,
collection_id: str,
bbox: Optional[List[Union[float, int]]] = None,
datetime: Optional[Union[str, datetime]] = None,
limit: int = 10,
token: str = None,
**kwargs,
) -> stac_types.ItemCollection:
return stac_types.ItemCollection(
type="FeatureCollection", features=[items[0:limit]]
)


@pytest.fixture(autouse=True)
def client_validation() -> TestClient:
app = StacApi(
settings=ApiSettings(enable_response_models=True), client=CoreClient()
)
with TestClient(app.app) as client:
yield client


@pytest.fixture(autouse=True)
def client_no_validation() -> TestClient:
app = StacApi(
settings=ApiSettings(enable_response_models=False), client=CoreClient()
)
with TestClient(app.app) as client:
yield client


@pytest.mark.parametrize("limit", [1, 10, 50, 100, 200, 250])
@pytest.mark.parametrize("validate", [True, False])
def test_benchmark_items(
benchmark, client_validation, client_no_validation, validate, limit
):
"""Benchmark items endpoint."""
params = {"limit": limit}

def f(p):
if validate:
return client_validation.get("/collections/fake_collection/items", params=p)
else:
return client_no_validation.get(
"/collections/fake_collection/items", params=p
)

benchmark.group = "Items With Model validation" if validate else "Items"

response = benchmark(f, params)
assert response.status_code == 200


@pytest.mark.parametrize("validate", [True, False])
def test_benchmark_collection(
benchmark, client_validation, client_no_validation, validate
):
"""Benchmark items endpoint."""

def f():
if validate:
return client_validation.get("/collections/fake_collection")
else:
return client_no_validation.get("/collections/fake_collection")

benchmark.group = "Collection With Model validation" if validate else "Collection"

response = benchmark(f)
assert response.status_code == 200


@pytest.mark.parametrize("validate", [True, False])
def test_benchmark_collections(
benchmark, client_validation, client_no_validation, validate
):
"""Benchmark items endpoint."""

def f():
if validate:
return client_validation.get("/collections")
else:
return client_no_validation.get("/collections")

benchmark.group = "Collections With Model validation" if validate else "Collections"

response = benchmark(f)
assert response.status_code == 200
Loading