diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index f526dbc2..bea927b4 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -4,6 +4,7 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::lumi::LumiEntry; use super::ntuple_subgrid::NtupleSubgridV1; +use super::read_only_sparse_subgrid::ReadOnlySparseSubgridV1; use super::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use either::Either::{Left, Right}; use float_cmp::approx_eq; @@ -711,10 +712,17 @@ impl Grid { let mut new_subgrid = LagrangeSparseSubgridV1::from(&*grid).into(); mem::swap(subgrid, &mut new_subgrid); } - SubgridEnum::LagrangeSparseSubgridV1(_) => { + SubgridEnum::LagrangeSubgridV2(grid) => { + let mut new_subgrid = ReadOnlySparseSubgridV1::from(&*grid).into(); + mem::swap(subgrid, &mut new_subgrid); + } + SubgridEnum::LagrangeSparseSubgridV1(_) + | SubgridEnum::ReadOnlySparseSubgridV1(_) => { // nothing to optimize here } - SubgridEnum::NtupleSubgridV1(_) | SubgridEnum::LagrangeSubgridV2(_) => todo!(), + SubgridEnum::NtupleSubgridV1(_) => { + todo!() + } } } } diff --git a/pineappl/src/lagrange_subgrid.rs b/pineappl/src/lagrange_subgrid.rs index c705d7a9..b2ac386b 100644 --- a/pineappl/src/lagrange_subgrid.rs +++ b/pineappl/src/lagrange_subgrid.rs @@ -10,7 +10,7 @@ use ndarray::Array3; use serde::{Deserialize, Serialize}; use std::mem; -fn weightfun(x: f64) -> f64 { +pub(crate) fn weightfun(x: f64) -> f64 { (x.sqrt() / (1.0 - 0.99 * x)).powi(3) } @@ -357,14 +357,14 @@ impl Subgrid for LagrangeSubgridV1 { /// Subgrid which uses Lagrange-interpolation. #[derive(Deserialize, Serialize)] pub struct LagrangeSubgridV2 { - grid: Option>, - ntau: usize, - ny1: usize, - ny2: usize, + pub(crate) grid: Option>, + pub(crate) ntau: usize, + pub(crate) ny1: usize, + pub(crate) ny2: usize, y1order: usize, y2order: usize, tauorder: usize, - itaumin: usize, + pub(crate) itaumin: usize, itaumax: usize, reweight1: bool, reweight2: bool, @@ -374,7 +374,7 @@ pub struct LagrangeSubgridV2 { y2max: f64, taumin: f64, taumax: f64, - static_q2: f64, + pub(crate) static_q2: f64, } impl LagrangeSubgridV2 { diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index 745f2525..0154da19 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -11,5 +11,6 @@ pub mod grid; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; +pub mod read_only_sparse_subgrid; pub mod sparse_array3; pub mod subgrid; diff --git a/pineappl/src/read_only_sparse_subgrid.rs b/pineappl/src/read_only_sparse_subgrid.rs new file mode 100644 index 00000000..bef4cece --- /dev/null +++ b/pineappl/src/read_only_sparse_subgrid.rs @@ -0,0 +1,236 @@ +//! TODO + +use super::grid::Ntuple; +use super::lagrange_subgrid::{self, LagrangeSubgridV2}; +use super::sparse_array3::SparseArray3; +use super::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; +use either::Either; +use ndarray::Axis; +use serde::{Deserialize, Serialize}; +use std::mem; + +/// TODO +#[derive(Deserialize, Serialize)] +pub struct ReadOnlySparseSubgridV1 { + array: SparseArray3, + q2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, + reweight_x1: Vec, + reweight_x2: Vec, +} + +impl ReadOnlySparseSubgridV1 { + /// Constructor. + #[must_use] + pub fn new( + subgrid_params: &SubgridParams, + extra_subgrid_params: &ExtraSubgridParams, + q2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, + reweight_x1: Vec, + reweight_x2: Vec, + ) -> Self { + Self { + array: SparseArray3::new( + subgrid_params.q2_bins(), + subgrid_params.x_bins(), + extra_subgrid_params.x2_bins(), + ), + q2_grid, + x1_grid, + x2_grid, + reweight_x1, + reweight_x2, + } + } +} + +impl Subgrid for ReadOnlySparseSubgridV1 { + fn convolute( + &self, + _: &[f64], + _: &[f64], + _: &[f64], + lumi: Either<&dyn Fn(usize, usize, usize) -> f64, &dyn Fn(f64, f64, f64) -> f64>, + ) -> f64 { + let lumi = lumi.left().unwrap(); + + self.array + .indexed_iter() + .map(|((iq2, ix1, ix2), sigma)| { + let mut value = sigma * lumi(ix1, ix2, iq2); + if !self.reweight_x1.is_empty() { + value *= self.reweight_x1[ix1]; + } + if !self.reweight_x2.is_empty() { + value *= self.reweight_x2[ix2]; + } + value + }) + .sum() + } + + fn fill(&mut self, _: &Ntuple) { + panic!("this grid doesn't support the fill operation"); + } + + fn q2_grid(&self) -> Vec { + self.q2_grid.clone() + } + + fn x1_grid(&self) -> Vec { + self.x1_grid.clone() + } + + fn x2_grid(&self) -> Vec { + self.x2_grid.clone() + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + if let SubgridEnum::ReadOnlySparseSubgridV1(other_grid) = other { + if self.array.is_empty() && !transpose { + mem::swap(&mut self.array, &mut other_grid.array); + } else { + // TODO: we need much more checks here if the subgrids are compatible at all + + if transpose { + for ((i, k, j), value) in other_grid.array.indexed_iter() { + self.array[[i, j, k]] += value; + } + } else { + for ((i, j, k), value) in other_grid.array.indexed_iter() { + self.array[[i, j, k]] += value; + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.array.clear(); + } else { + self.array.iter_mut().for_each(|x| *x *= factor); + } + } + + fn q2_slice(&self) -> (usize, usize) { + let range = self.array.x_range(); + + (range.start, range.end) + } + + fn fill_q2_slice(&self, q2_slice: usize, grid: &mut [f64]) { + let x1: Vec<_> = self + .x1_grid + .iter() + .enumerate() + .map(|(i, &x)| if self.reweight_x1.is_empty() { 1.0 } else { self.reweight_x1[i] } / x) + .collect(); + let x2: Vec<_> = self + .x2_grid + .iter() + .enumerate() + .map(|(i, &x)| if self.reweight_x2.is_empty() { 1.0 } else { self.reweight_x2[i] } / x) + .collect(); + + for value in grid.iter_mut() { + *value = 0.0; + } + + for ((_, ix1, ix2), value) in self + .array + .indexed_iter() + .filter(|((iq2, _, _), _)| *iq2 == q2_slice) + { + grid[ix1 * self.x2_grid.len() + ix2] = value * x1[ix1] * x2[ix2]; + } + } + + fn write_q2_slice(&mut self, q2_slice: usize, grid: &[f64]) { + self.array.remove_x(q2_slice); + + grid.iter() + .enumerate() + .filter(|(_, &value)| value != 0.0) + .for_each(|(index, &value)| { + self.array[[ + q2_slice, + index / self.x2_grid.len(), + index % self.x2_grid.len(), + ]] = value; + }); + } + + fn symmetrize(&mut self) { + let mut new_array = + SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); + + for ((i, j, k), &sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k > j) { + new_array[[i, j, k]] = sigma; + } + // do not change the diagonal entries (k==j) + for ((i, j, k), &sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { + new_array[[i, k, j]] += sigma; + } + + mem::swap(&mut self.array, &mut new_array); + } +} + +impl From<&LagrangeSubgridV2> for ReadOnlySparseSubgridV1 { + fn from(subgrid: &LagrangeSubgridV2) -> Self { + let array = subgrid.grid.as_ref().map_or_else( + || SparseArray3::new(subgrid.ntau, subgrid.ny1, subgrid.ny2), + // in the following case we should optimize when ny2 > ny1 + |grid| { + if subgrid.static_q2 == -1.0 { + SparseArray3::from_ndarray(grid, subgrid.itaumin, subgrid.ntau) + } else { + // in this case we've detected a static scale for this bin and we can collapse + // the Q^2 axis into a single bin + SparseArray3::from_ndarray( + &grid + .sum_axis(Axis(0)) + .into_shape((1, subgrid.ny1, subgrid.ny2)) + .unwrap(), + 0, + 1, + ) + } + }, + ); + let q2_grid = if subgrid.static_q2 == -1.0 { + subgrid.q2_grid() + } else { + vec![subgrid.static_q2] + }; + let x1_grid = subgrid.x1_grid(); + let x2_grid = subgrid.x2_grid(); + let reweight_x1 = x1_grid + .iter() + .map(|x| lagrange_subgrid::weightfun(*x)) + .collect(); + let reweight_x2 = x2_grid + .iter() + .map(|x| lagrange_subgrid::weightfun(*x)) + .collect(); + + Self { + array, + q2_grid, + x1_grid, + x2_grid, + reweight_x1, + reweight_x2, + } + } +} diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index e183302f..d443f2e9 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -3,6 +3,7 @@ use super::grid::Ntuple; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::ntuple_subgrid::NtupleSubgridV1; +use super::read_only_sparse_subgrid::ReadOnlySparseSubgridV1; use either::Either; use enum_dispatch::enum_dispatch; use serde::{Deserialize, Serialize}; @@ -20,6 +21,8 @@ pub enum SubgridEnum { LagrangeSparseSubgridV1, /// Lagrange-interpolation subgrid with possibly different x1 and x2 bins. LagrangeSubgridV2, + /// Read-only sparse subgrid with possibly different x1 and x2 bins. + ReadOnlySparseSubgridV1, } /// Trait each subgrid must implement.