Skip to content

Commit

Permalink
fix(api): add max page size and tests (#468)
Browse files Browse the repository at this point in the history
* Add max page size and tests. Cleanup some impl details.

* Update src/db/collections/ledger_update.rs

Co-authored-by: Jochen Görtler <grtlr@users.noreply.github.com>
  • Loading branch information
Alexandcoats and grtlr authored Jul 26, 2022
1 parent f093776 commit ed797eb
Show file tree
Hide file tree
Showing 12 changed files with 192 additions and 75 deletions.
8 changes: 4 additions & 4 deletions bin/inx-chronicle/src/api/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use axum::{
extract::rejection::{ExtensionRejection, QueryRejection, TypedHeaderRejection},
response::IntoResponse,
};
use chronicle::runtime::ErrorLevel;
use chronicle::{db::collections::ParseSortError, runtime::ErrorLevel};
use hyper::{header::InvalidHeaderValue, StatusCode};
use mongodb::bson::document::ValueAccessError;
use serde::Serialize;
Expand Down Expand Up @@ -109,17 +109,17 @@ pub enum ParseError {
#[allow(dead_code)]
#[error("Invalid cursor")]
BadPagingState,
#[error("Invalid sort order descriptor")]
BadSortDescriptor,
#[cfg(feature = "stardust")]
#[error(transparent)]
BeeBlockStardust(#[from] bee_block_stardust::Error),
#[error(transparent)]
Bool(#[from] ParseBoolError),
#[error(transparent)]
DecimalU256(#[from] uint::FromDecStrErr),
#[error(transparent)]
Int(#[from] ParseIntError),
#[error(transparent)]
DecimalU256(#[from] uint::FromDecStrErr),
SortOrder(#[from] ParseSortError),
#[error(transparent)]
TimeRange(#[from] time::error::ComponentRange),
}
Expand Down
41 changes: 35 additions & 6 deletions bin/inx-chronicle/src/api/extractors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ use axum::extract::{FromRequest, Query};
use serde::Deserialize;
use time::{Duration, OffsetDateTime};

use super::error::ApiError;
use super::{error::ApiError, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE};

#[derive(Copy, Clone, Deserialize)]
#[serde(default)]
#[derive(Debug, Copy, Clone, Deserialize, PartialEq, Eq)]
#[serde(default, rename_all = "camelCase")]
pub struct Pagination {
pub page_size: usize,
pub page: usize,
Expand All @@ -18,7 +18,7 @@ pub struct Pagination {
impl Default for Pagination {
fn default() -> Self {
Self {
page_size: 100,
page_size: DEFAULT_PAGE_SIZE,
page: 0,
}
}
Expand All @@ -29,15 +29,16 @@ impl<B: Send> FromRequest<B> for Pagination {
type Rejection = ApiError;

async fn from_request(req: &mut axum::extract::RequestParts<B>) -> Result<Self, Self::Rejection> {
let Query(pagination) = Query::<Pagination>::from_request(req)
let Query(mut pagination) = Query::<Pagination>::from_request(req)
.await
.map_err(ApiError::QueryError)?;
pagination.page_size = pagination.page_size.min(MAX_PAGE_SIZE);
Ok(pagination)
}
}

#[derive(Copy, Clone, Default, Deserialize)]
#[serde(default)]
#[serde(default, rename_all = "camelCase")]
pub struct TimeRangeQuery {
start_timestamp: Option<u32>,
end_timestamp: Option<u32>,
Expand Down Expand Up @@ -131,3 +132,31 @@ impl<B: Send> FromRequest<B> for Expanded {
Ok(expanded)
}
}

#[cfg(test)]
mod test {
use axum::{
extract::{FromRequest, RequestParts},
http::Request,
};

use super::*;

#[tokio::test]
async fn page_size_clamped() {
let mut req = RequestParts::new(
Request::builder()
.method("GET")
.uri("/?pageSize=9999999")
.body(())
.unwrap(),
);
assert_eq!(
Pagination::from_request(&mut req).await.unwrap(),
Pagination {
page_size: MAX_PAGE_SIZE,
..Default::default()
}
);
}
}
3 changes: 3 additions & 0 deletions bin/inx-chronicle/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ pub use self::{
};
use self::{config::ApiData, routes::routes};

pub const DEFAULT_PAGE_SIZE: usize = 100;
pub const MAX_PAGE_SIZE: usize = 1000;

/// The result of a request to the api
pub type ApiResult<T> = Result<T, ApiError>;

Expand Down
97 changes: 67 additions & 30 deletions bin/inx-chronicle/src/api/stardust/history/extractors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,9 @@ use chronicle::{
};
use serde::Deserialize;

use crate::api::{
error::ParseError,
stardust::{sort_order_from_str, DEFAULT_PAGE_SIZE, DEFAULT_SORT_ORDER},
ApiError,
};
use crate::api::{error::ParseError, ApiError, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE};

#[derive(Clone)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct LedgerUpdatesByAddressPagination {
pub page_size: usize,
pub sort: SortOrder,
Expand Down Expand Up @@ -80,28 +76,34 @@ impl<B: Send> FromRequest<B> for LedgerUpdatesByAddressPagination {
.await
.map_err(ApiError::QueryError)?;

let sort = query.sort.map_or(Ok(DEFAULT_SORT_ORDER), sort_order_from_str)?;
let sort = query
.sort
.as_deref()
.map_or(Ok(Default::default()), str::parse)
.map_err(ParseError::SortOrder)?;

let pagination = if let Some(cursor) = query.cursor {
let (page_size, cursor) = if let Some(cursor) = query.cursor {
let cursor: LedgerUpdatesByAddressCursor = cursor.parse()?;
LedgerUpdatesByAddressPagination {
page_size: cursor.page_size,
cursor: Some((cursor.milestone_index, Some((cursor.output_id, cursor.is_spent)))),
sort,
}
(
cursor.page_size,
Some((cursor.milestone_index, Some((cursor.output_id, cursor.is_spent)))),
)
} else {
LedgerUpdatesByAddressPagination {
page_size: query.page_size.unwrap_or(DEFAULT_PAGE_SIZE),
cursor: query.start_milestone_index.map(|i| (i, None)),
sort,
}
(
query.page_size.unwrap_or(DEFAULT_PAGE_SIZE),
query.start_milestone_index.map(|i| (i, None)),
)
};

Ok(pagination)
Ok(LedgerUpdatesByAddressPagination {
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
sort,
})
}
}

#[derive(Clone)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct LedgerUpdatesByMilestonePagination {
pub page_size: usize,
pub cursor: Option<(OutputId, bool)>,
Expand Down Expand Up @@ -152,25 +154,24 @@ impl<B: Send> FromRequest<B> for LedgerUpdatesByMilestonePagination {
.await
.map_err(ApiError::QueryError)?;

let pagination = if let Some(cursor) = query.cursor {
let (page_size, cursor) = if let Some(cursor) = query.cursor {
let cursor: LedgerUpdatesByMilestoneCursor = cursor.parse()?;
LedgerUpdatesByMilestonePagination {
page_size: cursor.page_size,
cursor: Some((cursor.output_id, cursor.is_spent)),
}
(cursor.page_size, Some((cursor.output_id, cursor.is_spent)))
} else {
LedgerUpdatesByMilestonePagination {
page_size: query.page_size.unwrap_or(DEFAULT_PAGE_SIZE),
cursor: None,
}
(query.page_size.unwrap_or(DEFAULT_PAGE_SIZE), None)
};

Ok(pagination)
Ok(LedgerUpdatesByMilestonePagination {
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
})
}
}

#[cfg(test)]
mod test {
use axum::{extract::RequestParts, http::Request};

use super::*;

#[test]
Expand All @@ -195,4 +196,40 @@ mod test {
let parsed: LedgerUpdatesByMilestoneCursor = cursor.parse().unwrap();
assert_eq!(parsed.to_string(), cursor);
}

#[tokio::test]
async fn page_size_clamped() {
let mut req = RequestParts::new(
Request::builder()
.method("GET")
.uri("/ledger/updates/by-address/0x00?pageSize=9999999")
.body(())
.unwrap(),
);
assert_eq!(
LedgerUpdatesByAddressPagination::from_request(&mut req).await.unwrap(),
LedgerUpdatesByAddressPagination {
page_size: MAX_PAGE_SIZE,
sort: Default::default(),
cursor: Default::default()
}
);

let mut req = RequestParts::new(
Request::builder()
.method("GET")
.uri("/ledger/updates/by-milestone/0?pageSize=9999999")
.body(())
.unwrap(),
);
assert_eq!(
LedgerUpdatesByMilestonePagination::from_request(&mut req)
.await
.unwrap(),
LedgerUpdatesByMilestonePagination {
page_size: MAX_PAGE_SIZE,
cursor: Default::default()
}
);
}
}
65 changes: 51 additions & 14 deletions bin/inx-chronicle/src/api/stardust/indexer/extractors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,9 @@ use mongodb::bson;
use primitive_types::U256;
use serde::Deserialize;

use crate::api::{
error::ParseError,
stardust::{sort_order_from_str, DEFAULT_PAGE_SIZE, DEFAULT_SORT_ORDER},
ApiError,
};
use crate::api::{error::ParseError, ApiError, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE};

#[derive(Clone)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct IndexedOutputsPagination<Q>
where
bson::Document: From<Q>,
Expand Down Expand Up @@ -111,7 +107,11 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<BasicOutputsQuery> {
(None, query.page_size.unwrap_or(DEFAULT_PAGE_SIZE))
};

let sort = query.sort.map_or(Ok(DEFAULT_SORT_ORDER), sort_order_from_str)?;
let sort = query
.sort
.as_deref()
.map_or(Ok(Default::default()), str::parse)
.map_err(ParseError::SortOrder)?;

Ok(IndexedOutputsPagination {
query: BasicOutputsQuery {
Expand Down Expand Up @@ -157,7 +157,7 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<BasicOutputsQuery> {
created_before: query.created_before.map(Into::into),
created_after: query.created_after.map(Into::into),
},
page_size,
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
sort,
include_spent: query.include_spent.unwrap_or_default(),
Expand Down Expand Up @@ -199,7 +199,11 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<AliasOutputsQuery> {
(None, query.page_size.unwrap_or(DEFAULT_PAGE_SIZE))
};

let sort = query.sort.map_or(Ok(DEFAULT_SORT_ORDER), sort_order_from_str)?;
let sort = query
.sort
.as_deref()
.map_or(Ok(Default::default()), str::parse)
.map_err(ParseError::SortOrder)?;

Ok(IndexedOutputsPagination {
query: AliasOutputsQuery {
Expand Down Expand Up @@ -237,7 +241,7 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<AliasOutputsQuery> {
created_before: query.created_before.map(Into::into),
created_after: query.created_after.map(Into::into),
},
page_size,
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
sort,
include_spent: query.include_spent.unwrap_or_default(),
Expand Down Expand Up @@ -276,7 +280,11 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<FoundryOutputsQuery> {
(None, query.page_size.unwrap_or(DEFAULT_PAGE_SIZE))
};

let sort = query.sort.map_or(Ok(DEFAULT_SORT_ORDER), sort_order_from_str)?;
let sort = query
.sort
.as_deref()
.map_or(Ok(Default::default()), str::parse)
.map_err(ParseError::SortOrder)?;

Ok(IndexedOutputsPagination {
query: FoundryOutputsQuery {
Expand All @@ -299,7 +307,7 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<FoundryOutputsQuery> {
created_before: query.created_before.map(Into::into),
created_after: query.created_after.map(Into::into),
},
page_size,
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
sort,
include_spent: query.include_spent.unwrap_or_default(),
Expand Down Expand Up @@ -350,7 +358,11 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<NftOutputsQuery> {
(None, query.page_size.unwrap_or(DEFAULT_PAGE_SIZE))
};

let sort = query.sort.map_or(Ok(DEFAULT_SORT_ORDER), sort_order_from_str)?;
let sort = query
.sort
.as_deref()
.map_or(Ok(Default::default()), str::parse)
.map_err(ParseError::SortOrder)?;

Ok(IndexedOutputsPagination {
query: NftOutputsQuery {
Expand Down Expand Up @@ -401,7 +413,7 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<NftOutputsQuery> {
created_before: query.created_before.map(Into::into),
created_after: query.created_after.map(Into::into),
},
page_size,
page_size: page_size.min(MAX_PAGE_SIZE),
cursor,
sort,
include_spent: query.include_spent.unwrap_or_default(),
Expand All @@ -411,6 +423,8 @@ impl<B: Send> FromRequest<B> for IndexedOutputsPagination<NftOutputsQuery> {

#[cfg(test)]
mod test {
use axum::{extract::RequestParts, http::Request};

use super::*;

#[test]
Expand All @@ -423,4 +437,27 @@ mod test {
let parsed: IndexedOutputsCursor = cursor.parse().unwrap();
assert_eq!(parsed.to_string(), cursor);
}

#[tokio::test]
async fn page_size_clamped() {
let mut req = RequestParts::new(
Request::builder()
.method("GET")
.uri("/outputs/basic?pageSize=9999999")
.body(())
.unwrap(),
);
assert_eq!(
IndexedOutputsPagination::<BasicOutputsQuery>::from_request(&mut req)
.await
.unwrap(),
IndexedOutputsPagination {
page_size: MAX_PAGE_SIZE,
query: Default::default(),
cursor: Default::default(),
sort: Default::default(),
include_spent: Default::default()
}
);
}
}
Loading

0 comments on commit ed797eb

Please sign in to comment.