diff --git a/Cargo.lock b/Cargo.lock
index f16c4d980fe..bd3d8f9175b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -843,6 +843,7 @@ dependencies = [
  "cc",
  "cfg-if",
  "constant_time_eq",
+ "digest 0.10.6",
 ]
 
 [[package]]
@@ -900,6 +901,15 @@ version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
 
+[[package]]
+name = "block-padding"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93"
+dependencies = [
+ "generic-array 0.14.7",
+]
+
 [[package]]
 name = "blst"
 version = "0.3.10"
@@ -1129,6 +1139,17 @@ dependencies = [
  "zeroize",
 ]
 
+[[package]]
+name = "chacha20"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
+dependencies = [
+ "cfg-if",
+ "cipher 0.4.4",
+ "cpufeatures",
+]
+
 [[package]]
 name = "chacha20poly1305"
 version = "0.9.1"
@@ -1136,7 +1157,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5"
 dependencies = [
  "aead 0.4.3",
- "chacha20",
+ "chacha20 0.8.2",
  "cipher 0.3.0",
  "poly1305",
  "zeroize",
@@ -4482,6 +4503,7 @@ version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
 dependencies = [
+ "block-padding 0.3.3",
  "generic-array 0.14.7",
 ]
 
@@ -9687,6 +9709,16 @@ dependencies = [
  "cfg-if",
  "cpufeatures",
  "digest 0.10.6",
+ "sha2-asm",
+]
+
+[[package]]
+name = "sha2-asm"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf27176fb5d15398e3a479c652c20459d9dac830dedd1fa55b42a77dbcdbfcea"
+dependencies = [
+ "cc",
 ]
 
 [[package]]
@@ -11485,7 +11517,12 @@ dependencies = [
 name = "subspace-proof-of-space"
 version = "0.1.0"
 dependencies = [
+ "bitvec",
+ "blake3",
+ "chacha20 0.9.1",
  "criterion",
+ "rand 0.8.5",
+ "sha2 0.10.6",
  "subspace-chiapos",
  "subspace-core-primitives",
 ]
diff --git a/Cargo.toml b/Cargo.toml
index bb68ff86226..086aa99571e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -23,10 +23,13 @@ members = [
 #
 # This list is ordered alphabetically.
 [profile.dev.package]
+bitvec = { opt-level = 3 }
 blake2 = { opt-level = 3 }
+blake3 = { opt-level = 3 }
 blake2b_simd = { opt-level = 3 }
 blst = { opt-level = 3 }
 blst_from_scratch = { opt-level = 3 }
+chacha20 = { opt-level = 3 }
 chacha20poly1305 = { opt-level = 3 }
 cranelift-codegen = { opt-level = 3 }
 cranelift-wasm = { opt-level = 3 }
diff --git a/crates/subspace-proof-of-space/Cargo.toml b/crates/subspace-proof-of-space/Cargo.toml
index 16068d6e6c8..2e59e4131cd 100644
--- a/crates/subspace-proof-of-space/Cargo.toml
+++ b/crates/subspace-proof-of-space/Cargo.toml
@@ -16,11 +16,29 @@ include = [
 bench = false
 
 [dependencies]
+bitvec = { version = "1.0.1", default-features = false, features = ["alloc", "atomic"], optional = true }
+blake3 = { version = "1.3.3", default-features = false, optional = true }
+chacha20 = { version = "0.9.1", default-features = false, optional = true }
 subspace-chiapos = { git = "https://github.com/subspace/chiapos", rev = "3b1ab3ca24764d25da30e0c8243e0bf304b776a5", optional = true }
 subspace-core-primitives = { version = "0.1.0", path = "../subspace-core-primitives", default-features = false }
 
+# Ugly workaround for https://github.com/rust-lang/cargo/issues/1197
+[target.'cfg(any(target_os = "linux", target_os = "macos", all(target_os = "windows", target_env = "gnu")))'.dependencies.sha2]
+features = ["asm"]
+version = "0.10.6"
+optional = true
+
+# Ugly workaround for https://github.com/rust-lang/cargo/issues/1197
+# `asm` feature is not supported on Windows except with GNU toolchain
+[target.'cfg(not(any(target_os = "linux", target_os = "macos", all(target_os = "windows", target_env = "gnu"))))'.dependencies.sha2]
+default-features = false
+version = "0.10.6"
+optional = true
+
 [dev-dependencies]
 criterion = "0.4.0"
+rand = "0.8.5"
+subspace-chiapos = { git = "https://github.com/subspace/chiapos", rev = "3b1ab3ca24764d25da30e0c8243e0bf304b776a5" }
 
 [[bench]]
 name = "pos"
@@ -29,6 +47,9 @@ harness = false
 [features]
 default = ["std"]
 std = [
+    "bitvec?/std",
+    "blake3?/std",
+    "chacha20?/std",
     "subspace-core-primitives/std",
 ]
 # Enable Chia proof of space support (legacy implementation uses C++ chiapos), only works in `std` environment for now
@@ -36,6 +57,15 @@ chia-legacy = [
     "std",
     "subspace-chiapos",
 ]
+# Enable Chia proof of space support, using alternative implementation, works in no-std environment as well
+chia = [
+    "bitvec",
+    "blake3",
+    "chacha20",
+    "sha2",
+]
+# Enable support for all possible K for chia: from smallest to insanely large as well as not popular in general
+all-chia-k = []
 # Enables shim proof of space that works much faster than original and can be used for testing purposes to reduce memory
 # and CPU usage
 shim = []
diff --git a/crates/subspace-proof-of-space/benches/pos.rs b/crates/subspace-proof-of-space/benches/pos.rs
index 622526b3aaa..40ca9f8be3b 100644
--- a/crates/subspace-proof-of-space/benches/pos.rs
+++ b/crates/subspace-proof-of-space/benches/pos.rs
@@ -1,20 +1,20 @@
 #![feature(const_trait_impl)]
 
-#[cfg(any(feature = "chia-legacy", feature = "shim"))]
+#[cfg(any(feature = "chia-legacy", feature = "chia", feature = "shim"))]
 use criterion::black_box;
 use criterion::{criterion_group, criterion_main, Criterion};
-#[cfg(any(feature = "chia-legacy", feature = "shim"))]
+#[cfg(any(feature = "chia-legacy", feature = "chia", feature = "shim"))]
 use subspace_core_primitives::PosSeed;
-#[cfg(any(feature = "chia-legacy", feature = "shim"))]
+#[cfg(any(feature = "chia-legacy", feature = "chia", feature = "shim"))]
 use subspace_proof_of_space::{Quality, Table};
 
-#[cfg(any(feature = "chia-legacy", feature = "shim"))]
+#[cfg(any(feature = "chia-legacy", feature = "chia", feature = "shim"))]
 const SEED: PosSeed = PosSeed::from([
     35, 2, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198,
     204, 10, 9, 10, 11, 129, 139, 171, 15, 23,
 ]);
 
-#[cfg(any(feature = "chia-legacy", feature = "shim"))]
+#[cfg(any(feature = "chia-legacy", feature = "chia", feature = "shim"))]
 fn pos_bench<PosTable>(
     c: &mut Criterion,
     name: &'static str,
@@ -70,7 +70,7 @@ fn pos_bench<PosTable>(
 }
 
 pub fn criterion_benchmark(c: &mut Criterion) {
-    #[cfg(not(any(feature = "chia-legacy", feature = "shim")))]
+    #[cfg(not(any(feature = "chia-legacy", feature = "chia", feature = "shim")))]
     {
         let _ = c;
         panic!(r#"Enable "chia" and/or "shim" feature to run benches"#);
@@ -82,6 +82,20 @@ pub fn criterion_benchmark(c: &mut Criterion) {
         // This challenge index with above seed is known to have a solution
         let challenge_index_with_solution = 1;
 
+        pos_bench::<subspace_proof_of_space::chia_legacy::ChiaTable>(
+            c,
+            "chia-legacy",
+            challenge_index_without_solution,
+            challenge_index_with_solution,
+        )
+    }
+    #[cfg(feature = "chia")]
+    {
+        // This challenge index with above seed is known to not have a solution
+        let challenge_index_without_solution = 1232460437;
+        // This challenge index with above seed is known to have a solution
+        let challenge_index_with_solution = 124537303;
+
         pos_bench::<subspace_proof_of_space::chia::ChiaTable>(
             c,
             "chia",
diff --git a/crates/subspace-proof-of-space/src/chia.rs b/crates/subspace-proof-of-space/src/chia.rs
new file mode 100644
index 00000000000..607aea6e2cd
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chia.rs
@@ -0,0 +1,98 @@
+//! Chia proof of space implementation
+use crate::chiapos::Tables;
+use crate::{PosTableType, Quality, Table};
+use core::mem;
+use subspace_core_primitives::{PosProof, PosQualityBytes, PosSeed};
+
+const K: u8 = 17;
+
+/// Abstraction that represents quality of the solution in the table.
+///
+/// Chia implementation.
+#[derive(Debug)]
+#[must_use]
+pub struct ChiaQuality<'a> {
+    bytes: PosQualityBytes,
+    challenge: [u8; 32],
+    tables: &'a Tables<K>,
+}
+
+impl<'a> Quality for ChiaQuality<'a> {
+    fn to_bytes(&self) -> PosQualityBytes {
+        self.bytes
+    }
+
+    fn create_proof(&self) -> PosProof {
+        self.tables
+            .find_proof(&self.challenge)
+            .next()
+            .map(PosProof::from)
+            .expect("Proof always exists if quality exists; qed")
+    }
+}
+
+/// Subspace proof of space table
+///
+/// Chia implementation.
+#[derive(Debug)]
+pub struct ChiaTable {
+    tables: Tables<K>,
+}
+
+impl Table for ChiaTable {
+    const TABLE_TYPE: PosTableType = PosTableType::Chia;
+
+    type Quality<'a> = ChiaQuality<'a>;
+
+    fn generate(seed: &PosSeed) -> ChiaTable {
+        Self {
+            tables: Tables::<K>::create_simple((*seed).into()),
+        }
+    }
+
+    fn find_quality(&self, challenge_index: u32) -> Option<Self::Quality<'_>> {
+        let mut challenge = [0; 32];
+        challenge[..mem::size_of::<u32>()].copy_from_slice(&challenge_index.to_le_bytes());
+        let maybe_quality = self.tables.find_quality(&challenge).next();
+        maybe_quality.map(|quality| ChiaQuality {
+            bytes: PosQualityBytes::from(quality),
+            challenge,
+            tables: &self.tables,
+        })
+    }
+
+    fn is_proof_valid(
+        seed: &PosSeed,
+        challenge_index: u32,
+        proof: &PosProof,
+    ) -> Option<PosQualityBytes> {
+        let mut challenge = [0; 32];
+        challenge[..mem::size_of::<u32>()].copy_from_slice(&challenge_index.to_le_bytes());
+        Tables::<K>::verify(**seed, &challenge, proof).map(PosQualityBytes::from)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    const SEED: PosSeed = PosSeed::from([
+        35, 2, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92,
+        198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23,
+    ]);
+
+    #[test]
+    fn basic() {
+        let table = ChiaTable::generate(&SEED);
+
+        assert!(table.find_quality(1232460437).is_none());
+
+        {
+            let challenge_index = 124537303;
+            let quality = table.find_quality(challenge_index).unwrap();
+            let proof = quality.create_proof();
+            let maybe_quality = ChiaTable::is_proof_valid(&SEED, challenge_index, &proof);
+            assert_eq!(maybe_quality, Some(quality.to_bytes()));
+        }
+    }
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos.rs b/crates/subspace-proof-of-space/src/chiapos.rs
new file mode 100644
index 00000000000..038aef62d3d
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos.rs
@@ -0,0 +1,94 @@
+//! Chia proof of space reimplementation in Rust
+
+mod constants;
+mod table;
+mod tables;
+#[cfg(test)]
+mod tests;
+mod utils;
+
+pub use crate::chiapos::table::TablesCache;
+use crate::chiapos::table::{
+    fn_hashing_input_bytes, metadata_size_bytes, x_size_bytes, y_size_bytes,
+};
+use crate::chiapos::tables::TablesGeneric;
+use crate::chiapos::utils::EvaluatableUsize;
+
+type Seed = [u8; 32];
+type Challenge = [u8; 32];
+type Quality = [u8; 32];
+
+/// Collection of Chia tables
+#[derive(Debug)]
+pub struct Tables<const K: u8>(TablesGeneric<K>)
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 1) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 2) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 3) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 4) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 5) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 6) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 7) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized;
+
+macro_rules! impl_any {
+    ($($k: expr$(,)? )*) => {
+        $(
+impl Tables<$k> {
+    /// Create Chia proof of space tables.
+    ///
+    /// Advanced version of [`Self::create_simple`] that allows to reuse cache.
+    pub fn create(seed: Seed, cache: &mut TablesCache<$k>) -> Self {
+        Self(TablesGeneric::<$k>::create(
+            seed, cache,
+        ))
+    }
+
+    /// Create Chia proof of space tables.
+    ///
+    /// Simpler version of [`Self::create`].
+    pub fn create_simple(seed: Seed) -> Self {
+        Self::create(seed, &mut TablesCache::default())
+    }
+
+    /// Find proof of space quality for given challenge.
+    pub fn find_quality<'a>(
+        &'a self,
+        challenge: &'a Challenge,
+    ) -> impl Iterator<Item = Quality> + 'a {
+        self.0.find_quality(challenge)
+    }
+
+    /// Find proof of space for given challenge.
+    pub fn find_proof<'a>(
+        &'a self,
+        challenge: &'a Challenge,
+    ) -> impl Iterator<Item = [u8; 64 * $k / 8]> + 'a {
+        self.0.find_proof(challenge)
+    }
+
+    /// Verify proof of space for given seed and challenge.
+    pub fn verify(
+        seed: Seed,
+        challenge: &Challenge,
+        proof_of_space: &[u8; 64 * $k as usize / 8],
+    ) -> Option<Quality> {
+        TablesGeneric::<$k>::verify(seed, challenge, proof_of_space)
+    }
+}
+        )*
+    }
+}
+
+// These are all `K` which can be safely used on 32-bit platform
+#[cfg(feature = "all-chia-k")]
+impl_any!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 19, 20);
+impl_any!(16, 17, 18);
+
+// These are all `K` which require 64-bit platform
+#[cfg(target_pointer_width = "64")]
+impl_any!(32, 33, 34, 35);
+#[cfg(all(target_pointer_width = "64", feature = "all-chia-k"))]
+impl_any!(21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 36, 37, 38, 39, 40);
diff --git a/crates/subspace-proof-of-space/src/chiapos/constants.rs b/crates/subspace-proof-of-space/src/chiapos/constants.rs
new file mode 100644
index 00000000000..8329c388717
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/constants.rs
@@ -0,0 +1,6 @@
+/// PRNG extension parameter to avoid collisions
+pub(super) const PARAM_EXT: u8 = 6;
+pub(super) const PARAM_M: u16 = 1 << PARAM_EXT;
+pub(super) const PARAM_B: u16 = 119;
+pub(super) const PARAM_C: u16 = 127;
+pub(super) const PARAM_BC: u16 = PARAM_B * PARAM_C;
diff --git a/crates/subspace-proof-of-space/src/chiapos/table.rs b/crates/subspace-proof-of-space/src/chiapos/table.rs
new file mode 100644
index 00000000000..b4d8daa67c0
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/table.rs
@@ -0,0 +1,694 @@
+#[cfg(test)]
+mod tests;
+pub(super) mod types;
+
+extern crate alloc;
+
+use crate::chiapos::constants::{PARAM_B, PARAM_BC, PARAM_C, PARAM_EXT, PARAM_M};
+use crate::chiapos::table::types::{CopyBitsDestination, Metadata, Position, X, Y};
+use crate::chiapos::utils::EvaluatableUsize;
+use crate::chiapos::Seed;
+use alloc::vec::Vec;
+use chacha20::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
+use chacha20::{ChaCha8, Key, Nonce};
+use core::mem;
+
+/// Compute the size of `x` in bytes
+pub const fn x_size_bytes(k: u8) -> usize {
+    usize::from(k).div_ceil(u8::BITS as usize)
+}
+
+/// Compute the size of `y` in bits
+pub(super) const fn y_size_bits(k: u8) -> usize {
+    usize::from(k) + usize::from(PARAM_EXT)
+}
+
+/// Compute the size of `y` in bytes
+pub const fn y_size_bytes(k: u8) -> usize {
+    y_size_bits(k).div_ceil(u8::BITS as usize)
+}
+
+/// Metadata size in bits
+pub const fn metadata_size_bits(k: u8, table_number: u8) -> usize {
+    usize::from(k)
+        * match table_number {
+            1 => 1,
+            2 => 2,
+            3 | 4 => 4,
+            5 => 3,
+            6 => 2,
+            7 => 0,
+            _ => unreachable!(),
+        }
+}
+
+/// Max size in bits for any table
+pub(crate) const fn max_metadata_size_bits(k: u8) -> usize {
+    metadata_size_bits(k, 1)
+        .max(metadata_size_bits(k, 2))
+        .max(metadata_size_bits(k, 3))
+        .max(metadata_size_bits(k, 4))
+        .max(metadata_size_bits(k, 5))
+        .max(metadata_size_bits(k, 6))
+        .max(metadata_size_bits(k, 7))
+}
+
+/// Metadata size in bytes rounded up
+pub const fn metadata_size_bytes(k: u8, table_number: u8) -> usize {
+    metadata_size_bits(k, table_number).div_ceil(u8::BITS as usize)
+}
+
+pub const fn fn_hashing_input_bytes(k: u8) -> usize {
+    (y_size_bits(k) + max_metadata_size_bits(k) * 2).div_ceil(u8::BITS as usize)
+}
+
+/// ChaCha8 [`Vec`] sufficient for the whole first table for [`K`].
+/// Prefer [`partial_y`] if you need partial y just for a single `x`.
+fn partial_ys<const K: u8>(seed: Seed) -> Vec<u8> {
+    let output_len_bits = usize::from(K) * (1 << K);
+    let mut output = vec![0; output_len_bits.div_ceil(u8::BITS as usize)];
+
+    let key = Key::from(seed);
+    let nonce = Nonce::default();
+
+    let mut cipher = ChaCha8::new(&key, &nonce);
+
+    cipher.apply_keystream(&mut output);
+
+    output
+}
+
+/// ChaCha8 byte for a single `y` at `x` in the first table for [`K`], returns bytes and offset (in
+/// bits) within those bytes at which data start.
+/// Prefer [`partial_ys`] if you process the whole first table.
+pub(super) fn partial_y<const K: u8>(
+    seed: Seed,
+    x: usize,
+) -> ([u8; (K as usize * 2).div_ceil(u8::BITS as usize)], usize) {
+    let skip_bits = usize::from(K) * x;
+    let skip_bytes = skip_bits / u8::BITS as usize;
+    let skip_bits = skip_bits % u8::BITS as usize;
+
+    let mut output = [0; (K as usize * 2).div_ceil(u8::BITS as usize)];
+
+    let key = Key::from(seed);
+    let nonce = Nonce::default();
+
+    let mut cipher = ChaCha8::new(&key, &nonce);
+
+    cipher.seek(skip_bytes);
+    cipher.apply_keystream(&mut output);
+
+    (output, skip_bits)
+}
+
+fn calculate_left_targets() -> Vec<Vec<Vec<usize>>> {
+    let param_b = usize::from(PARAM_B);
+    let param_c = usize::from(PARAM_C);
+
+    (0..=1usize)
+        .map(|parity| {
+            (0..usize::from(PARAM_BC))
+                .map(|r| {
+                    let c = r / param_c;
+
+                    (0..usize::from(PARAM_M))
+                        .map(|m| {
+                            ((c + m) % param_b) * param_c
+                                + (((2 * m + parity) * (2 * m + parity) + r) % param_c)
+                        })
+                        .collect()
+                })
+                .collect()
+        })
+        .collect()
+}
+
+fn calculate_left_target_on_demand(parity: usize, r: usize, m: usize) -> usize {
+    let param_b = usize::from(PARAM_B);
+    let param_c = usize::from(PARAM_C);
+
+    let c = r / param_c;
+
+    ((c + m) % param_b) * param_c + (((2 * m + parity) * (2 * m + parity) + r) % param_c)
+}
+
+/// Caches that can be used to optimize creation of multiple [`Tables`](super::Tables).
+#[derive(Debug)]
+pub struct TablesCache<const K: u8>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    left_bucket: Bucket<K>,
+    right_bucket: Bucket<K>,
+    rmap_scratch: Vec<RmapItem>,
+    left_targets: Vec<Vec<Vec<usize>>>,
+}
+
+impl<const K: u8> Default for TablesCache<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    /// Create new instance
+    fn default() -> Self {
+        // Pair of buckets that are a sliding window of 2 buckets across the whole table
+        let left_bucket = Bucket::default();
+        let right_bucket = Bucket::default();
+
+        let left_targets = calculate_left_targets();
+        // TODO: This is the capacity chiapos allocates it with, check if it is correct
+        let rmap_scratch = Vec::with_capacity(usize::from(PARAM_BC));
+
+        Self {
+            left_bucket,
+            right_bucket,
+            rmap_scratch,
+            left_targets,
+        }
+    }
+}
+
+#[derive(Debug)]
+pub(super) struct Match<const K: u8>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    left_index: usize,
+    left_y: Y<K>,
+    right_index: usize,
+}
+
+#[derive(Debug)]
+struct Bucket<const K: u8>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    /// Bucket index
+    bucket_index: usize,
+    /// `y` values in this bucket
+    ys: Vec<Y<K>>,
+    /// Start position of this bucket in the table
+    start_position: usize,
+}
+
+impl<const K: u8> Default for Bucket<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    fn default() -> Self {
+        Self {
+            bucket_index: 0,
+            // TODO: Currently twice the average size (*2), re-consider size in the future if it is
+            //  typically exceeded
+            ys: Vec::with_capacity(usize::from(PARAM_BC) / (1 << PARAM_EXT) * 2),
+            start_position: 0,
+        }
+    }
+}
+
+#[derive(Debug, Default, Copy, Clone)]
+pub(super) struct RmapItem {
+    count: usize,
+    start_index: usize,
+}
+
+/// Y value will be in the first bits of returned byte array, `partial_y_offset` is in bits
+pub(super) fn compute_f1<const K: u8>(x: X<K>, partial_y: &[u8], partial_y_offset: usize) -> Y<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    let mut y = Y::default();
+
+    // Copy partial y value derived from ChaCha8 stream
+    y.copy_bits_from(partial_y, partial_y_offset, K, 0_usize);
+    // And `PARAM_EXT` most significant bits from `x`
+    y.copy_bits_from(&x, 0_usize, PARAM_EXT, K);
+
+    y
+}
+
+/// `rmap_scratch` is just an optimization to reuse allocations between calls.
+///
+/// For verification purposes use [`num_matches`] instead.
+///
+/// Returns `None` if either of buckets is empty.
+fn find_matches<'a, const K: u8>(
+    left_bucket_ys: &'a [Y<K>],
+    right_bucket_ys: &'a [Y<K>],
+    rmap_scratch: &'a mut Vec<RmapItem>,
+    left_targets: &'a [Vec<Vec<usize>>],
+) -> Option<impl Iterator<Item = Match<K>> + 'a>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    // Clear and set to correct size with zero values
+    rmap_scratch.clear();
+    rmap_scratch.resize_with(usize::from(PARAM_BC), RmapItem::default);
+    let rmap = rmap_scratch;
+
+    // Both left and right buckets can be empty
+    let first_left_bucket_y = usize::from(left_bucket_ys.first()?);
+    let first_right_bucket_y = usize::from(right_bucket_ys.first()?);
+    // Since all entries in a bucket are obtained after division by `PARAM_BC`, we can compute
+    // quotient more efficiently by subtracting base value rather than computing remainder of
+    // division
+    let base = (first_right_bucket_y / usize::from(PARAM_BC)) * usize::from(PARAM_BC);
+    for (right_index, y) in right_bucket_ys.iter().enumerate() {
+        let r = usize::from(y) - base;
+
+        // Same `y` and as the result `r` can appear in the table multiple times, in which case
+        // they'll all occupy consecutive slots in `right_bucket` and all we need to store is just
+        // the first position and number of elements.
+        if rmap[r].count == 0 {
+            rmap[r].start_index = right_index;
+        }
+        rmap[r].count += 1;
+    }
+    let rmap = rmap.as_slice();
+
+    // Same idea as above, but avoids division by leveraging the fact that each bucket is exactly
+    // `PARAM_BC` away from the previous one in terms of divisor by `PARAM_BC`
+    let base = base - usize::from(PARAM_BC);
+    let parity = (first_left_bucket_y / usize::from(PARAM_BC)) % 2;
+    let left_targets = &left_targets[parity];
+
+    Some(
+        left_bucket_ys
+            .iter()
+            .enumerate()
+            .flat_map(move |(left_index, y)| {
+                let r = usize::from(y) - base;
+                let left_targets = &left_targets[r];
+
+                (0..usize::from(PARAM_M)).flat_map(move |m| {
+                    let r_target = left_targets[m];
+                    let rmap_item = rmap[r_target];
+
+                    (rmap_item.start_index..)
+                        .take(rmap_item.count)
+                        .map(move |right_index| Match {
+                            left_index,
+                            left_y: *y,
+                            right_index,
+                        })
+                })
+            }),
+    )
+}
+
+/// Simplified version of [`find_matches`] for verification purposes.
+pub(super) fn num_matches<const K: u8>(left_y: &Y<K>, right_y: &Y<K>) -> usize
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    let right_r = usize::from(right_y) % usize::from(PARAM_BC);
+    let parity = (usize::from(left_y) / usize::from(PARAM_BC)) % 2;
+    let left_r = usize::from(left_y) % usize::from(PARAM_BC);
+
+    let mut matches = 0;
+    for m in 0..usize::from(PARAM_M) {
+        let r_target = calculate_left_target_on_demand(parity, left_r, m);
+        if r_target == right_r {
+            matches += 1;
+        }
+    }
+
+    matches
+}
+
+pub(super) fn compute_fn<const K: u8, const TABLE_NUMBER: u8, const PARENT_TABLE_NUMBER: u8>(
+    y: Y<K>,
+    left_metadata: Metadata<K, PARENT_TABLE_NUMBER>,
+    right_metadata: Metadata<K, PARENT_TABLE_NUMBER>,
+) -> (Y<K>, Metadata<K, TABLE_NUMBER>)
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, PARENT_TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized,
+{
+    let hash = {
+        let mut input = [0; fn_hashing_input_bytes(K)];
+
+        input.copy_bits_from(&y, 0_usize, y_size_bits(K), 0_usize);
+        input.copy_bits_from(
+            &left_metadata,
+            0_usize,
+            metadata_size_bits(K, PARENT_TABLE_NUMBER),
+            y_size_bits(K),
+        );
+        input.copy_bits_from(
+            &right_metadata,
+            0_usize,
+            metadata_size_bits(K, PARENT_TABLE_NUMBER),
+            y_size_bits(K) + metadata_size_bits(K, PARENT_TABLE_NUMBER),
+        );
+
+        // Take only bytes where bits were set
+        let num_bytes_with_data = (y_size_bits(K) + metadata_size_bits(K, PARENT_TABLE_NUMBER) * 2)
+            .div_ceil(u8::BITS as usize);
+        blake3::hash(&input[..num_bytes_with_data])
+    };
+    let mut y_output = Y::default();
+    y_output.copy_bits_from(hash.as_bytes(), 0_usize, y_size_bits(K), 0_usize);
+
+    let mut metadata = Metadata::default();
+
+    if TABLE_NUMBER < 4 {
+        metadata.copy_bits_from(
+            &left_metadata,
+            0_usize,
+            metadata_size_bits(K, PARENT_TABLE_NUMBER),
+            0_usize,
+        );
+        metadata.copy_bits_from(
+            &right_metadata,
+            0_usize,
+            metadata_size_bits(K, PARENT_TABLE_NUMBER),
+            metadata_size_bits(K, PARENT_TABLE_NUMBER),
+        );
+    } else if metadata_size_bits(K, TABLE_NUMBER) > 0 {
+        metadata.copy_bits_from(
+            hash.as_bytes(),
+            y_size_bits(K),
+            metadata_size_bits(K, TABLE_NUMBER),
+            0_usize,
+        );
+    }
+
+    (y_output, metadata)
+}
+
+fn match_and_extend_table<'a, const K: u8, const TABLE_NUMBER: u8, const PARENT_TABLE_NUMBER: u8>(
+    last_table: &Table<K, PARENT_TABLE_NUMBER>,
+    left_bucket: &'a Bucket<K>,
+    right_bucket: &'a Bucket<K>,
+    rmap_scratch: &'a mut Vec<RmapItem>,
+    left_targets: &'a [Vec<Vec<usize>>],
+    table_output: &'a mut Vec<(Y<K>, Metadata<K, TABLE_NUMBER>, [Position<K>; 2])>,
+) where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, PARENT_TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized,
+{
+    if let Some(matches) = find_matches::<K>(
+        &left_bucket.ys,
+        &right_bucket.ys,
+        rmap_scratch,
+        left_targets,
+    ) {
+        table_output.extend(matches.map(|m| {
+            let left_position = left_bucket.start_position + m.left_index;
+            let right_position = right_bucket.start_position + m.right_index;
+            let left_metadata = last_table
+                .metadata(left_position)
+                .expect("Position resulted from matching is correct; qed");
+            let right_metadata = last_table
+                .metadata(right_position)
+                .expect("Position resulted from matching is correct; qed");
+
+            let (y, metadata) = compute_fn::<K, TABLE_NUMBER, PARENT_TABLE_NUMBER>(
+                m.left_y,
+                left_metadata,
+                right_metadata,
+            );
+            (
+                y,
+                metadata,
+                [
+                    Position::from(left_position),
+                    Position::from(right_position),
+                ],
+            )
+        }));
+    }
+}
+
+#[derive(Debug)]
+pub(super) enum Table<const K: u8, const TABLE_NUMBER: u8>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    /// First table with contents of entries split into separate vectors for more efficient access
+    First {
+        /// Derived values computed from `x`
+        ys: Vec<Y<K>>,
+        /// X values
+        xs: Vec<X<K>>,
+    },
+    /// Other tables
+    Other {
+        /// Derived values computed from previous table
+        ys: Vec<Y<K>>,
+        /// Left and right entry positions in a previous table encoded into bits
+        positions: Vec<[Position<K>; 2]>,
+        /// Metadata corresponding to each entry
+        metadatas: Vec<Metadata<K, TABLE_NUMBER>>,
+    },
+}
+
+impl<const K: u8> Table<K, 1>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 1) }>: Sized,
+{
+    /// Create the table
+    pub(super) fn create(seed: Seed) -> Self {
+        let partial_ys = partial_ys::<K>(seed);
+
+        let mut t_1 = (0..1 << K)
+            .map(|x| {
+                let partial_y_offset = x * usize::from(K);
+                let x = X::from(x);
+                let y = compute_f1::<K>(x, &partial_ys, partial_y_offset);
+
+                (y, x)
+            })
+            .collect::<Vec<_>>();
+
+        t_1.sort_unstable_by_key(|(y, ..)| *y);
+
+        let (ys, xs) = t_1.into_iter().unzip();
+
+        Self::First { ys, xs }
+    }
+
+    /// All `x`s as [`BitSlice`], for individual `x`s needs to be slices into [`K`] bits slices
+    pub(super) fn xs(&self) -> &[X<K>] {
+        match self {
+            Table::First { xs, .. } => xs,
+            _ => {
+                unreachable!()
+            }
+        }
+    }
+}
+
+mod private {
+    pub(in super::super) trait SupportedOtherTables {}
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 2>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 2) }>: Sized,
+{
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 3>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 3) }>: Sized,
+{
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 4>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 4) }>: Sized,
+{
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 5>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 5) }>: Sized,
+{
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 6>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 6) }>: Sized,
+{
+}
+
+impl<const K: u8> private::SupportedOtherTables for Table<K, 7>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 7) }>: Sized,
+{
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> Table<K, TABLE_NUMBER>
+where
+    Self: private::SupportedOtherTables,
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized,
+{
+    pub(super) fn create<const PARENT_TABLE_NUMBER: u8>(
+        last_table: &Table<K, PARENT_TABLE_NUMBER>,
+        cache: &mut TablesCache<K>,
+    ) -> Self
+    where
+        EvaluatableUsize<{ metadata_size_bytes(K, PARENT_TABLE_NUMBER) }>: Sized,
+    {
+        let left_bucket = &mut cache.left_bucket;
+        let right_bucket = &mut cache.right_bucket;
+        let rmap_scratch = &mut cache.rmap_scratch;
+        let left_targets = &cache.left_targets;
+
+        // Clear input variables just in case
+        left_bucket.bucket_index = 0;
+        left_bucket.ys.clear();
+        left_bucket.start_position = 0;
+        right_bucket.bucket_index = 1;
+        right_bucket.ys.clear();
+        right_bucket.start_position = 0;
+
+        let num_values = 1 << K;
+        let mut t_n = Vec::with_capacity(num_values);
+        // TODO: Parallelize something here?
+        // TODO: [unstable] `group_by` can be used here:
+        //  https://doc.rust-lang.org/std/primitive.slice.html#method.group_by
+        for (position, &y) in last_table.ys().iter().enumerate() {
+            let bucket_index = usize::from(&y) / usize::from(PARAM_BC);
+
+            if bucket_index == left_bucket.bucket_index {
+                left_bucket.ys.push(y);
+                continue;
+            } else if bucket_index == right_bucket.bucket_index {
+                if right_bucket.ys.is_empty() {
+                    right_bucket.start_position = position;
+                }
+                right_bucket.ys.push(y);
+                continue;
+            }
+
+            match_and_extend_table(
+                last_table,
+                left_bucket,
+                right_bucket,
+                rmap_scratch,
+                left_targets,
+                &mut t_n,
+            );
+
+            if bucket_index == right_bucket.bucket_index + 1 {
+                // Move right bucket into left bucket while reusing existing allocations
+                mem::swap(left_bucket, right_bucket);
+                right_bucket.bucket_index = bucket_index;
+                right_bucket.ys.clear();
+                right_bucket.start_position = position;
+
+                right_bucket.ys.push(y);
+            } else {
+                // We have skipped some buckets, clean up both left and right buckets
+                left_bucket.bucket_index = bucket_index;
+                left_bucket.ys.clear();
+                left_bucket.start_position = position;
+
+                left_bucket.ys.push(y);
+
+                right_bucket.bucket_index = bucket_index + 1;
+                right_bucket.ys.clear();
+            }
+        }
+        // Iteration stopped, but we did not process contents of the last pair of buckets yet
+        match_and_extend_table(
+            last_table,
+            left_bucket,
+            right_bucket,
+            rmap_scratch,
+            left_targets,
+            &mut t_n,
+        );
+
+        t_n.sort_unstable_by_key(|(y, ..)| *y);
+
+        let mut ys = Vec::with_capacity(num_values);
+        let mut positions = Vec::with_capacity(num_values);
+        let mut metadatas = Vec::with_capacity(num_values);
+
+        for (y, metadata, [left_position, right_position]) in t_n {
+            ys.push(y);
+            positions.push([left_position, right_position]);
+            if TABLE_NUMBER != 7 {
+                metadatas.push(metadata);
+            }
+        }
+
+        Self::Other {
+            ys,
+            positions,
+            metadatas,
+        }
+    }
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> Table<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    /// All `y`s as [`BitSlice`], for individual `x`s needs to be slices into [`K`] bits slices
+    pub(super) fn ys(&self) -> &[Y<K>] {
+        let (Table::First { ys, .. } | Table::Other { ys, .. }) = self;
+        ys
+    }
+
+    /// Returns `None` on invalid position or first table, `Some(left_position, right_position)` in
+    /// previous table on success
+    pub(super) fn position(&self, position: Position<K>) -> Option<[Position<K>; 2]> {
+        match self {
+            Table::First { .. } => None,
+            Table::Other { positions, .. } => positions.get(usize::from(position)).copied(),
+        }
+    }
+
+    /// Returns `None` on invalid position or for table number 7
+    pub(super) fn metadata(&self, position: usize) -> Option<Metadata<K, TABLE_NUMBER>> {
+        match self {
+            Table::First { xs, .. } => {
+                // This is a bit awkward since we store `K` bits in each `x`, but we also
+                // technically have `metadata_size_bits` function that is supposed to point to the
+                // number of bytes metadata has for table 1. They are the same and trying to slice
+                // it will cause overhead. Use assertion here instead that will be removed by
+                // compiler and not incurring any overhead.
+                assert_eq!(metadata_size_bits(K, TABLE_NUMBER), usize::from(K));
+
+                xs.get(position).map(|x| {
+                    let mut metadata = Metadata::default();
+                    metadata.copy_bits_from(x, 0_usize, K, 0_usize);
+                    metadata
+                })
+            }
+            Table::Other { metadatas, .. } => metadatas.get(position).copied(),
+        }
+    }
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/table/tests.rs b/crates/subspace-proof-of-space/src/chiapos/table/tests.rs
new file mode 100644
index 00000000000..b6eacb85d11
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/table/tests.rs
@@ -0,0 +1,255 @@
+//! Tests translated into Rust from
+//! https://github.com/Chia-Network/chiapos/blob/a2049c5367fe60930533a995f7ffded538f04dc4/tests/test.cpp
+
+use crate::chiapos::constants::{PARAM_B, PARAM_BC, PARAM_C, PARAM_EXT};
+use crate::chiapos::table::types::{Metadata, X, Y};
+use crate::chiapos::table::{
+    calculate_left_targets, compute_f1, compute_fn, find_matches, fn_hashing_input_bytes,
+    metadata_size_bytes, partial_y, y_size_bytes,
+};
+use crate::chiapos::utils::EvaluatableUsize;
+use crate::chiapos::Seed;
+use std::collections::BTreeMap;
+
+/// Chia does this for some reason 🤷‍
+fn to_chia_seed(seed: &Seed) -> Seed {
+    let mut chia_seed = [1u8; 32];
+    chia_seed[1..].copy_from_slice(&seed[..31]);
+    chia_seed
+}
+
+#[cfg(target_pointer_width = "64")]
+#[test]
+fn test_compute_f1_k35() {
+    const K: u8 = 35;
+    let seed = to_chia_seed(&[
+        0, 2, 3, 4, 5, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 41, 5, 6, 7, 8, 9, 10, 11,
+        12, 13, 11, 15, 16,
+    ]);
+
+    let xs = [525, 526, 625_usize];
+    let expected_ys = [948_868_477_184, 2_100_559_512_384, 1_455_233_158_208_usize];
+
+    for (x, expected_y) in xs.into_iter().zip(expected_ys) {
+        let (partial_y, partial_y_offset) = partial_y::<K>(seed, x);
+        let y = compute_f1::<K>(X::from(x), &partial_y, partial_y_offset);
+        let y = usize::from(&y);
+        assert_eq!(y, expected_y);
+    }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[test]
+fn test_compute_f1_k32() {
+    const K: u8 = 32;
+    let seed = to_chia_seed(&[
+        0, 2, 3, 4, 5, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 41, 5, 6, 7, 8, 9, 10, 11,
+        12, 13, 11, 15, 16,
+    ]);
+
+    let xs = [
+        192_837_491,
+        192_837_491 + 1,
+        192_837_491 + 2,
+        192_837_491 + 255_usize,
+    ];
+    let expected_ys = [
+        206_843_700_930,
+        32_315_542_210,
+        156_034_446_146,
+        128_694_732_738_usize,
+    ];
+
+    for (x, expected_y) in xs.into_iter().zip(expected_ys) {
+        let (partial_y, partial_y_offset) = partial_y::<K>(seed, x);
+        let y = compute_f1::<K>(X::from(x), &partial_y, partial_y_offset);
+        let y = usize::from(&y);
+        assert_eq!(y, expected_y);
+    }
+}
+
+fn check_match(yl: usize, yr: usize) -> bool {
+    let yl = yl as i64;
+    let yr = yr as i64;
+    let param_b = i64::from(PARAM_B);
+    let param_c = i64::from(PARAM_C);
+    let param_bc = i64::from(PARAM_BC);
+    let bl = yl / param_bc;
+    let br = yr / param_bc;
+    if bl + 1 != br {
+        // Buckets don't match
+        return false;
+    }
+    for m in 0..(1 << PARAM_EXT) {
+        if (((yr % param_bc) / param_c - (yl % param_bc) / param_c) - m) % param_b == 0 {
+            let mut c_diff = 2 * m + bl % 2;
+            c_diff *= c_diff;
+
+            if (((yr % param_bc) % param_c - (yl % param_bc) % param_c) - c_diff) % param_c == 0 {
+                return true;
+            }
+        }
+    }
+
+    false
+}
+
+// TODO: This test should be rewritten into something more readable, currently it is more or less
+//  direct translation from C++
+#[test]
+fn test_matches() {
+    const K: u8 = 12;
+    let seed = to_chia_seed(&[
+        20, 2, 5, 4, 51, 52, 23, 84, 91, 10, 111, 12, 13, 24, 151, 16, 228, 211, 254, 45, 92, 198,
+        204, 10, 9, 10, 11, 129, 139, 171, 15, 18,
+    ]);
+
+    let mut buckets = BTreeMap::<usize, Vec<_>>::new();
+    let mut x = 0;
+    for _ in 0..=1 << (K - 4) {
+        for _ in 0..16 {
+            let (partial_y, partial_y_offset) = partial_y::<K>(seed, x);
+            let y = compute_f1::<K>(X::from(x), &partial_y, partial_y_offset);
+            let bucket_index = usize::from(&y) / usize::from(PARAM_BC);
+
+            buckets.entry(bucket_index).or_default().push(y);
+
+            if x + 1 > (1 << K) - 1 {
+                break;
+            }
+
+            x += 1;
+        }
+
+        if x + 1 > (1 << K) - 1 {
+            break;
+        }
+    }
+
+    let left_targets = calculate_left_targets();
+    let mut rmap_scratch = Vec::new();
+    let buckets = buckets.into_values().collect::<Vec<_>>();
+    let mut total_matches = 0_usize;
+    for [mut left_bucket, mut right_bucket] in buckets.array_windows::<2>().cloned() {
+        left_bucket.sort_unstable();
+        left_bucket.reverse();
+        right_bucket.sort_unstable();
+        right_bucket.reverse();
+
+        let matches = find_matches::<K>(
+            &left_bucket,
+            &right_bucket,
+            &mut rmap_scratch,
+            &left_targets,
+        );
+        for m in matches.unwrap() {
+            let yl = usize::from(left_bucket.get(m.left_index).unwrap());
+            let yr = usize::from(right_bucket.get(m.right_index).unwrap());
+
+            assert!(check_match(yl, yr));
+            total_matches += 1;
+        }
+    }
+
+    assert!(
+        total_matches > (1 << K) / 2,
+        "total_matches {total_matches}"
+    );
+    assert!(
+        total_matches < (1 << K) * 2,
+        "total_matches {total_matches}"
+    );
+}
+
+fn verify_fn<const K: u8, const TABLE_NUMBER: u8, const PARENT_TABLE_NUMBER: u8>(
+    left_metadata: usize,
+    right_metadata: usize,
+    y: usize,
+    y_output_expected: usize,
+    metadata_expected: usize,
+) where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, PARENT_TABLE_NUMBER) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized,
+{
+    let (y_output, metadata) = compute_fn::<K, TABLE_NUMBER, PARENT_TABLE_NUMBER>(
+        Y::<K>::from(y),
+        Metadata::<K, PARENT_TABLE_NUMBER>::from(left_metadata),
+        Metadata::<K, PARENT_TABLE_NUMBER>::from(right_metadata),
+    );
+    let y_output = usize::from(&y_output);
+    assert_eq!(y_output, y_output_expected);
+    if metadata_expected != 0 {
+        assert_eq!(
+            metadata,
+            Metadata::<K, TABLE_NUMBER>::from(metadata_expected)
+        );
+    }
+}
+
+#[test]
+fn test_verify_fn() {
+    const K: u8 = 16;
+
+    verify_fn::<K, 2, 1>(0x44cb, 0x204f, 0x20a61a, 0x2af546, 0x44cb204f);
+    verify_fn::<K, 2, 1>(0x3c5f, 0xfda9, 0x3988ec, 0x15293b, 0x3c5ffda9);
+    verify_fn::<K, 3, 2>(
+        0x35bf992d,
+        0x7ce42c82,
+        0x31e541,
+        0xf73b3,
+        0x35bf992d7ce42c82,
+    );
+    verify_fn::<K, 3, 2>(
+        0x7204e52d,
+        0xf1fd42a2,
+        0x28a188,
+        0x3fb0b5,
+        0x7204e52df1fd42a2,
+    );
+    verify_fn::<K, 4, 3>(
+        0x5b6e6e307d4bedc,
+        0x8a9a021ea648a7dd,
+        0x30cb4c,
+        0x11ad5,
+        0xd4bd0b144fc26138,
+    );
+    verify_fn::<K, 4, 3>(
+        0xb9d179e06c0fd4f5,
+        0xf06d3fef701966a0,
+        0x1dd5b6,
+        0xe69a2,
+        0xd02115f512009d4d,
+    );
+    verify_fn::<K, 5, 4>(
+        0xc2cd789a380208a9,
+        0x19999e3fa46d6753,
+        0x25f01e,
+        0x1f22bd,
+        0xabe423040a33,
+    );
+    verify_fn::<K, 5, 4>(
+        0xbe3edc0a1ef2a4f0,
+        0x4da98f1d3099fdf5,
+        0x3feb18,
+        0x31501e,
+        0x7300a3a03ac5,
+    );
+    verify_fn::<K, 6, 5>(
+        0xc965815a47c5,
+        0xf5e008d6af57,
+        0x1f121a,
+        0x1cabbe,
+        0xc8cc6947,
+    );
+    verify_fn::<K, 6, 5>(
+        0xd420677f6cbd,
+        0x5894aa2ca1af,
+        0x2efde9,
+        0xc2121,
+        0x421bb8ec,
+    );
+    verify_fn::<K, 7, 6>(0x5fec898f, 0x82283d15, 0x14f410, 0x24c3c2, 0x0);
+    verify_fn::<K, 7, 6>(0x64ac5db9, 0x7923986, 0x590fd, 0x1c74a2, 0x0);
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/table/types.rs b/crates/subspace-proof-of-space/src/chiapos/table/types.rs
new file mode 100644
index 00000000000..fe8c63d9d21
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/table/types.rs
@@ -0,0 +1,759 @@
+use crate::chiapos::table::{
+    metadata_size_bits, metadata_size_bytes, x_size_bytes, y_size_bits, y_size_bytes,
+};
+use crate::chiapos::utils::EvaluatableUsize;
+use bitvec::prelude::*;
+use core::ops::Deref;
+use core::{fmt, mem};
+use std::cmp::Ordering;
+
+/// Copy `size` bits from `source` starting at `source_offset` into `destination` at
+/// `destination_offset`
+///
+/// ## Panics
+/// Panics if `source_offset > 7` or `destination_offset > 7`.
+/// Panics if `source_offset + size > source * u8::BITS` or
+/// `destination_offset + size > destination * u8::BITS`.
+// TODO: Should benefit from SIMD instructions
+// Inlining helps compiler remove most of the logic in this function
+#[inline(always)]
+#[track_caller]
+fn copy_bits(
+    source: &[u8],
+    source_offset: usize,
+    destination: &mut [u8],
+    destination_offset: usize,
+    size: usize,
+) {
+    const BYTE_SIZE: usize = u8::BITS as usize;
+
+    // TODO: Make it skip bytes automatically
+    assert!(source_offset <= 7, "source_offset {source_offset} > 7");
+    assert!(destination_offset <= 7, "source_offset {source_offset} > 7");
+    // Source length in bytes
+    let source_len = source.len();
+    // Destination length in bytes
+    let destination_len = destination.len();
+    assert!(
+        source_offset + size <= source_len * BYTE_SIZE,
+        "source_offset {source_offset} + size {size} > source.len() {source_len} * BYTE_SIZE {BYTE_SIZE}",
+    );
+    assert!(
+        destination_offset + size <= destination_len * BYTE_SIZE,
+        "destination_offset {destination_offset} + size {size} > destination.len() {destination_len} * BYTE_SIZE {BYTE_SIZE}",
+    );
+
+    // This number of bits in the last destination byte will be composed from source bits
+    let last_byte_bits_from_source = {
+        let last_byte_bits_from_source = (destination_offset + size) % BYTE_SIZE;
+        if last_byte_bits_from_source == 0 {
+            BYTE_SIZE
+        } else {
+            last_byte_bits_from_source
+        }
+    };
+
+    // Copy and shift bits left or right to match desired `OUT_OFFSET`
+    match destination_offset.cmp(&source_offset) {
+        // Strategy in case we shift bits left
+        //
+        // We start with the first byte that we compose into the final form form its original bits
+        // that do not need to be updated and bits from the source that need to be moved into it.
+        //
+        // Observation here is that source is potentially one byte longer than destination, so by
+        // processing the first byte separately we can continue iterating over source bytes with
+        // offset by 1 byte to the right with previous byte in destination allows us to move forward
+        // without touching previous bytes in the process.
+        //
+        // If length of source and destination bytes is not the same we skip very last iteration in
+        // general way. This is because last destination byte might have bits that need to be
+        // preserved and we don't want to read destination unnecessarily here, so we do another pass
+        // afterwards with destination bit preserved.
+        //
+        // If length of source and destination are the same, we iterate all the way to the end (but
+        // we still skip one destination byte due to iterating over source and destination bytes
+        // with offset to each other). After iteration the only thing left is just to take
+        // accumulator and apply to the last destination byte (again, the only byte we actually need
+        // to read).
+        Ordering::Less => {
+            // Offset between source and destination
+            let offset = source_offset - destination_offset;
+            // Preserve first bits from destination that must not be changed in accumulator
+            let mut left_acc = if destination_offset == 0 {
+                // Byte will be fully overridden by the source
+                0
+            } else {
+                destination[0] & (u8::MAX << (BYTE_SIZE - destination_offset))
+            };
+            // Add bits from the first source byte to the accumulator
+            left_acc |= (source[0] << offset) & (u8::MAX >> destination_offset);
+
+            // Compose destination bytes, skip the first source byte since we have already processed
+            // it above.
+            //
+            // Note that on every step source byte is to the right of the destination, skip last
+            // pair such that we can preserve trailing bits of the destination unchanged
+            // (this is an optimization, it allows us to not read `destination` in this loop at all)
+            for (source, destination) in source[1..]
+                .iter()
+                .zip(destination.iter_mut())
+                .rev()
+                .skip(if source_len != destination_len { 1 } else { 0 })
+                .rev()
+            {
+                // Take bits that were be moved out of the byte boundary and add the left side from
+                // accumulator
+                *destination = left_acc | (*source >> (BYTE_SIZE - offset));
+                // Store left side of the source bits in the accumulator that will be applied to the
+                // next byte
+                left_acc = *source << offset;
+            }
+
+            // Clear bits in accumulator that must not be copied into destination
+            let left_acc = left_acc & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+
+            if source_len != destination_len {
+                if let Some((source, destination)) =
+                    source[1..].iter().zip(destination.iter_mut()).last()
+                {
+                    let preserved_bits = if last_byte_bits_from_source == BYTE_SIZE {
+                        // Byte will be fully overridden by the source
+                        0
+                    } else {
+                        *destination & (u8::MAX >> last_byte_bits_from_source)
+                    };
+                    // Take bits that were be moved out of the byte boundary
+                    let source_bits = (*source >> (BYTE_SIZE - offset))
+                        & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+                    // Combine last accumulator bits (left most bits) with source bits and preserved
+                    // bits in destination
+                    *destination = left_acc | source_bits | preserved_bits;
+                }
+            } else {
+                // Shift source bits to the right and remove trailing bits that we'll get from
+                // destination, this is the middle part of the last destination byte
+                let source_bits = (source[source_len - 1] >> (BYTE_SIZE - offset))
+                    & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+                // Bits preserved in destination
+                let preserved_bits = if last_byte_bits_from_source == BYTE_SIZE {
+                    // Byte will be fully overridden by the source
+                    0
+                } else {
+                    destination[destination_len - 1] & (u8::MAX >> last_byte_bits_from_source)
+                };
+                // Combine last accumulator bits (left most bits) with source bits and preserved
+                // bits in destination
+                destination[destination_len - 1] = left_acc | source_bits | preserved_bits;
+            }
+        }
+        // Strategy here is much simpler: copy first and last bytes while accounting for bits that
+        // should be preserved in destination, otherwise do bulk copy
+        Ordering::Equal => {
+            if destination_offset > 0 {
+                // Clear bits of the first byte that will be overridden by bits from source
+                destination[0] &= u8::MAX << ((BYTE_SIZE - destination_offset) % BYTE_SIZE);
+                // Add bits to the first byte from source
+                destination[0] |= source[0] & (u8::MAX >> destination_offset);
+            } else {
+                destination[0] = source[0];
+            }
+
+            // Copy some bytes in bulk
+            if destination_len > 2 {
+                // Copy everything except first and last bytes
+                destination[1..destination_len - 1]
+                    .copy_from_slice(&source[1..destination_len - 1]);
+            }
+
+            if last_byte_bits_from_source == BYTE_SIZE {
+                destination[destination_len - 1] = source[destination_len - 1];
+            } else {
+                // Clear bits of the last byte that will be overridden by bits from source
+                destination[destination_len - 1] &= u8::MAX >> last_byte_bits_from_source;
+                // Add bits to the last byte from source
+                destination[destination_len - 1] |= source[destination_len - 1]
+                    & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+            }
+        }
+        // Strategy in case we shift bits right
+        //
+        // We start with the first byte that we compose into the final form form its original bits
+        // that do not need to be updated and bits from the source that need to be moved into it.
+        //
+        // Here destination is potentially one byte longer than source and we still need to preserve
+        // last destination bits that should not have been modified.
+        //
+        // If length of source and destination bytes is the same, we skip very last iteration. That
+        // is because it might have bits that need to be preserved and we don't want to read
+        // destination unnecessarily here, so we do another pass afterwards with destination bit
+        // preserved.
+        //
+        // If length of source and destination are not the same, we iterate all the way to the end.
+        // After iteration the only thing left is just to take accumulator and apply to the last
+        // destination byte (again, the only byte we actually need to read).
+        Ordering::Greater => {
+            // Offset between source and destination
+            let offset = destination_offset - source_offset;
+            {
+                // Bits preserved in destination
+                let preserved_bits = destination[0] & (u8::MAX << (BYTE_SIZE - destination_offset));
+                // Source bits that will be stored in the first byte
+                let source_bits = (source[0] >> offset) & (u8::MAX >> destination_offset);
+                // Combine preserved bits and source bits into the first destination byte
+                destination[0] = preserved_bits | source_bits;
+            }
+            // Store bits from first source byte that didn't fit into first destination byte into
+            // the accumulator
+            let mut left_acc = (source[0] & (u8::MAX >> source_offset)) << (BYTE_SIZE - offset);
+
+            // Compose destination bytes, skip the first pair since we have already processed
+            // them above.
+            //
+            // Note that we skip last pair in case source and destination are the same, such that we
+            // can preserve trailing bits of the destination unchanged (this is an optimization, it
+            // allows us to not read `destination` in this loop at all)
+            for (source, destination) in source
+                .iter()
+                .zip(destination.iter_mut())
+                .skip(1)
+                .rev()
+                .skip(if source_len == destination_len { 1 } else { 0 })
+                .rev()
+            {
+                // Shift source bits to the right and add the left side from accumulator
+                *destination = left_acc | (*source >> offset);
+                // Take bits that were moved out of the boundary into accumulator that will be
+                // applied to the next byte
+                left_acc = *source << (BYTE_SIZE - offset);
+            }
+
+            // Clear bits in accumulator that must not be copied into destination
+            let left_acc = left_acc & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+
+            if source_len == destination_len {
+                // In case we skipped last pair above, process it here
+                if let Some((source, destination)) =
+                    source.iter().zip(destination.iter_mut()).skip(1).last()
+                {
+                    let preserved_bits = if last_byte_bits_from_source == BYTE_SIZE {
+                        // Byte will be fully overridden by the source
+                        0
+                    } else {
+                        *destination & (u8::MAX >> last_byte_bits_from_source)
+                    };
+                    // Shift source bits to the right and clear bits that correspond to preserved
+                    // bits
+                    let source_bits =
+                        (*source >> offset) & (u8::MAX << (BYTE_SIZE - last_byte_bits_from_source));
+                    // Combine last accumulator bits (left most bits) with source bits and preserved
+                    // bits in destination
+                    *destination = left_acc | source_bits | preserved_bits;
+                }
+            } else {
+                // Bits preserved in destination
+                let preserved_bits =
+                    destination[destination_len - 1] & (u8::MAX >> last_byte_bits_from_source);
+                // Combine last accumulator bits (left most bits) with preserved bits in destination
+                destination[destination_len - 1] = left_acc | preserved_bits;
+            }
+        }
+    }
+}
+
+/// Container with data source and information about contents
+pub(in super::super) struct CopyBitsSourceData<'a> {
+    pub(in super::super) bytes: &'a [u8],
+    /// Where do bits of useful data start
+    pub(in super::super) bit_offset: usize,
+    /// Size in bits
+    pub(in super::super) bit_size: usize,
+}
+
+pub(in super::super) trait CopyBitsSource {
+    fn data(&self) -> CopyBitsSourceData<'_>;
+}
+
+pub(in super::super) trait CopyBitsDestination {
+    /// Where do bits of useful data start
+    const DATA_OFFSET: usize;
+
+    /// Underlying data container
+    fn data_mut(&mut self) -> &mut [u8];
+
+    /// Size in bits
+    fn bits(&self) -> usize;
+
+    /// Copy `size` bits from [`Source`] bits starting at `source_offset` and write at
+    /// `destination_offset` into this data structure. Contents of bits after `DESTINATION_OFFSET`
+    /// is not defined.
+    ///
+    /// ## Panics
+    /// Panics if `SOURCE_OFFSET + SIZE` is more bits than [`Source`] has or
+    /// `DESTINATION_OFFSET + SIZE` is more bits than [`Self::bits()`], higher level code must ensure
+    /// this never happens, method is not exposed outside of this crate and crate boundaries are
+    /// supposed to protect this invariant. Exposing error handling from here will be too noisy and
+    /// seemed not worth it.
+    // Inlining helps compiler remove most of the logic in this function
+    #[inline(always)]
+    fn copy_bits_from<Source, SourceOffset, Size, DestinationOffset>(
+        &mut self,
+        source: &Source,
+        source_offset: SourceOffset,
+        size: Size,
+        destination_offset: DestinationOffset,
+    ) where
+        Source: CopyBitsSource + ?Sized,
+        usize: From<SourceOffset>,
+        usize: From<Size>,
+        usize: From<DestinationOffset>,
+    {
+        let source_offset = usize::from(source_offset);
+        let size = usize::from(size);
+        let destination_offset = usize::from(destination_offset);
+        let source_data = source.data();
+
+        assert!(source_offset + size <= source_data.bit_size);
+        assert!(destination_offset + size <= self.bits());
+
+        // Which byte to start reading bytes at, taking into account where actual data bits start
+        // and desired offset
+        let read_byte_offset = (source_data.bit_offset + source_offset) / u8::BITS as usize;
+        // Which bit in read bytes is the first one we care about
+        let read_bit_offset = (source_data.bit_offset + source_offset) % u8::BITS as usize;
+        // How many bytes to read from source, while taking into account the fact desired source
+        // offset
+        let bytes_to_read = (read_bit_offset + size).div_ceil(u8::BITS as usize);
+        // Source bytes at which we have `SIZE` bits of data starting at `read_bit_offset` bit
+        let source_bytes = &source_data.bytes[read_byte_offset..][..bytes_to_read];
+
+        // Which byte to start writing bytes at, taking into account where actual data bits start
+        // and desired offset
+        let write_byte_offset = (Self::DATA_OFFSET + destination_offset) / u8::BITS as usize;
+        // Which bit in destination is the first bit at which source should actually be written,
+        // bits before that must be preserved
+        let write_bit_offset = (Self::DATA_OFFSET + destination_offset) % u8::BITS as usize;
+        // How many bytes to write into destination, while taking into account the fact desired
+        // destination offset
+        let bytes_to_write = (write_bit_offset + size).div_ceil(u8::BITS as usize);
+        // Destination bytes at which we have `SIZE` bits to write starting at `write_bit_offset`
+        let destination_bytes = &mut self.data_mut()[write_byte_offset..][..bytes_to_write];
+
+        // Defensive checks in debug builds before we have robust test for bit copying
+        #[cfg(debug_assertions)]
+        let first_destination_bits =
+            destination_bytes.view_bits::<Msb0>()[..write_bit_offset].to_bitvec();
+        #[cfg(debug_assertions)]
+        let last_destination_bits =
+            destination_bytes.view_bits::<Msb0>()[write_bit_offset + size..].to_bitvec();
+        copy_bits(
+            source_bytes,
+            read_bit_offset,
+            destination_bytes,
+            write_bit_offset,
+            size,
+        );
+        #[cfg(debug_assertions)]
+        assert_eq!(
+            first_destination_bits,
+            destination_bytes.view_bits::<Msb0>()[..write_bit_offset].to_bitvec(),
+            "Implementation bug in subspace-proof-of-space bit copy, please report to Nazar \
+            immediately with reproduction steps"
+        );
+        #[cfg(debug_assertions)]
+        assert_eq!(
+            destination_bytes.view_bits::<Msb0>()[write_bit_offset..][..size].to_bitvec(),
+            source_bytes.view_bits::<Msb0>()[read_bit_offset..][..size].to_bitvec(),
+            "Implementation bug in subspace-proof-of-space bit copy, please report to Nazar \
+            immediately with reproduction steps"
+        );
+        #[cfg(debug_assertions)]
+        assert_eq!(
+            last_destination_bits,
+            destination_bytes.view_bits::<Msb0>()[write_bit_offset + size..].to_bitvec(),
+            "Implementation bug in subspace-proof-of-space bit copy, please report to Nazar \
+            immediately with reproduction steps"
+        );
+    }
+}
+
+impl<T> CopyBitsSource for T
+where
+    T: AsRef<[u8]> + ?Sized,
+{
+    fn data(&self) -> CopyBitsSourceData<'_> {
+        CopyBitsSourceData {
+            bytes: self.as_ref(),
+            bit_offset: 0,
+            bit_size: self.as_ref().len() * u8::BITS as usize,
+        }
+    }
+}
+
+impl<T> CopyBitsDestination for T
+where
+    T: AsRef<[u8]> + AsMut<[u8]> + ?Sized,
+{
+    const DATA_OFFSET: usize = 0;
+
+    fn data_mut(&mut self) -> &mut [u8] {
+        self.as_mut()
+    }
+
+    fn bits(&self) -> usize {
+        self.as_ref().len() * u8::BITS as usize
+    }
+}
+
+/// Wrapper data structure around bits of `x` values, stores data in the last bits of internal array
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+#[repr(transparent)]
+pub(in super::super) struct X<const K: u8>([u8; x_size_bytes(K)])
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized;
+
+impl<const K: u8> Default for X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    fn default() -> Self {
+        Self([0; x_size_bytes(K)])
+    }
+}
+
+impl<const K: u8> fmt::Debug for X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("X")
+            .field(&&self.0.view_bits::<Msb0>()[Self::BYTES * u8::BITS as usize - Self::BITS..])
+            .finish()
+    }
+}
+
+impl<const K: u8> From<usize> for X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Will silently drop data if [`K`] is too small to store useful data in [`usize`], higher
+    /// level code must ensure this never happens, method is not exposed outside of this crate and
+    /// crate boundaries are supposed to protect this invariant. Exposing error handling from here
+    /// will be too noisy and seemed not worth it.
+    fn from(value: usize) -> Self {
+        let mut output = [0; x_size_bytes(K)];
+        // Copy last bytes
+        output.copy_from_slice(&value.to_be_bytes()[mem::size_of::<usize>() - Self::BYTES..]);
+        Self(output)
+    }
+}
+
+impl<const K: u8> From<&X<K>> for usize
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Panics if conversion to [`usize`] fails, higher level code must ensure this never happens,
+    /// method is not exposed outside of this crate and crate boundaries are supposed to protect
+    /// this invariant. Exposing error handling from here will be too noisy and seemed not worth it.
+    fn from(value: &X<K>) -> Self {
+        let mut output = 0_usize.to_be_bytes();
+        output[mem::size_of::<usize>() - x_size_bytes(K)..].copy_from_slice(&value.0);
+        usize::from_be_bytes(output)
+    }
+}
+
+impl<const K: u8> CopyBitsSource for X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    fn data(&self) -> CopyBitsSourceData<'_> {
+        CopyBitsSourceData {
+            bytes: &self.0,
+            bit_offset: Self::DATA_OFFSET,
+            bit_size: Self::BITS,
+        }
+    }
+}
+
+impl<const K: u8> CopyBitsDestination for X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    const DATA_OFFSET: usize = Self::DATA_OFFSET;
+
+    fn data_mut(&mut self) -> &mut [u8] {
+        &mut self.0
+    }
+
+    fn bits(&self) -> usize {
+        Self::BITS
+    }
+}
+
+impl<const K: u8> X<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Size in bytes
+    const BYTES: usize = x_size_bytes(K);
+    /// Size in bits
+    const BITS: usize = usize::from(K);
+    /// Where do bits of useful data start
+    const DATA_OFFSET: usize = Self::BYTES * u8::BITS as usize - Self::BITS;
+}
+
+/// Wrapper data structure around bits of `y` values, stores data in the last bits of internal
+/// array
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+#[repr(transparent)]
+pub(in super::super) struct Y<const K: u8>([u8; y_size_bytes(K)])
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized;
+
+impl<const K: u8> Default for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    fn default() -> Self {
+        Self([0; y_size_bytes(K)])
+    }
+}
+
+impl<const K: u8> fmt::Debug for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("Y").field(&self.deref()).finish()
+    }
+}
+
+// TODO: Implement bit matching and remove this
+impl<const K: u8> Deref for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    type Target = BitSlice<u8, Msb0>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0.view_bits::<Msb0>()[Self::DATA_OFFSET..]
+    }
+}
+
+impl<const K: u8> CopyBitsSource for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    fn data(&self) -> CopyBitsSourceData<'_> {
+        CopyBitsSourceData {
+            bytes: &self.0,
+            bit_offset: Self::DATA_OFFSET,
+            bit_size: Self::BITS,
+        }
+    }
+}
+
+impl<const K: u8> CopyBitsDestination for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    const DATA_OFFSET: usize = Self::DATA_OFFSET;
+
+    fn data_mut(&mut self) -> &mut [u8] {
+        &mut self.0
+    }
+
+    fn bits(&self) -> usize {
+        Self::BITS
+    }
+}
+
+#[cfg(test)]
+impl<const K: u8> From<usize> for Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    /// Will silently drop data if [`K`] is too small to store useful data in [`usize`], higher
+    /// level code must ensure this never happens, method is not exposed outside of this crate and
+    /// crate boundaries are supposed to protect this invariant. Exposing error handling from here
+    /// will be too noisy and seemed not worth it.
+    fn from(value: usize) -> Self {
+        let mut output = Self::default();
+        // Copy last bytes from big-endian `value` into `Y`
+        output
+            .0
+            .copy_from_slice(&value.to_be_bytes()[mem::size_of::<usize>() - Self::BYTES..]);
+        output
+    }
+}
+
+impl<const K: u8> From<&Y<K>> for usize
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    /// Panics if conversion to [`usize`] fails, higher level code must ensure this never happens,
+    /// method is not exposed outside of this crate and crate boundaries are supposed to protect
+    /// this invariant. Exposing error handling from here will be too noisy and seemed not worth it.
+    fn from(value: &Y<K>) -> Self {
+        let mut output = 0_usize.to_be_bytes();
+        output[mem::size_of::<usize>() - y_size_bytes(K)..].copy_from_slice(&value.0);
+        usize::from_be_bytes(output)
+    }
+}
+
+impl<const K: u8> Y<K>
+where
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+{
+    /// Size in bytes
+    const BYTES: usize = y_size_bytes(K);
+    /// Size in bits
+    const BITS: usize = y_size_bits(K);
+    /// Where do bits of useful data start
+    const DATA_OFFSET: usize = Self::BYTES * u8::BITS as usize - Self::BITS;
+}
+
+/// Wrapper data structure around bits of `metadata` values, stores data in the last bits of
+/// internal array
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+#[repr(transparent)]
+pub(in super::super) struct Metadata<const K: u8, const TABLE_NUMBER: u8>(
+    [u8; metadata_size_bytes(K, TABLE_NUMBER)],
+)
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized;
+
+impl<const K: u8, const TABLE_NUMBER: u8> Default for Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    fn default() -> Self {
+        Self([0; metadata_size_bytes(K, TABLE_NUMBER)])
+    }
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> fmt::Debug for Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("Metadata")
+            .field(&&self.0.view_bits::<Msb0>()[Self::DATA_OFFSET..])
+            .finish()
+    }
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> CopyBitsSource for Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    fn data(&self) -> CopyBitsSourceData<'_> {
+        CopyBitsSourceData {
+            bytes: &self.0,
+            bit_offset: Self::DATA_OFFSET,
+            bit_size: Self::BITS,
+        }
+    }
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> CopyBitsDestination for Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    const DATA_OFFSET: usize = Self::DATA_OFFSET;
+
+    fn data_mut(&mut self) -> &mut [u8] {
+        &mut self.0
+    }
+
+    fn bits(&self) -> usize {
+        Self::BITS
+    }
+}
+
+#[cfg(test)]
+impl<const K: u8, const TABLE_NUMBER: u8> From<usize> for Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    /// Will silently drop data if [`K`] is too small to store useful data in [`usize`], higher
+    /// level code must ensure this never happens, method is not exposed outside of this crate and
+    /// crate boundaries are supposed to protect this invariant. Exposing error handling from here
+    /// will be too noisy and seemed not worth it.
+    fn from(value: usize) -> Self {
+        let mut output = Self::default();
+        // Copy last bytes from big-endian `value` into `Metadata`
+        output
+            .0
+            .copy_from_slice(&value.to_be_bytes()[mem::size_of::<usize>() - Self::BYTES..]);
+        output
+    }
+}
+
+impl<const K: u8, const TABLE_NUMBER: u8> Metadata<K, TABLE_NUMBER>
+where
+    EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+{
+    /// Size in bytes
+    const BYTES: usize = metadata_size_bytes(K, TABLE_NUMBER);
+    /// Size in bits
+    const BITS: usize = metadata_size_bits(K, TABLE_NUMBER);
+    /// Where do bits of useful data start
+    const DATA_OFFSET: usize = Self::BYTES * u8::BITS as usize - Self::BITS;
+}
+
+/// Wrapper data structure around bits of `position` values, stores data in the last bits of internal
+/// array. Has the same size as [`X`], but different internal layout.
+#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
+#[repr(transparent)]
+pub(in super::super) struct Position<const K: u8>([u8; x_size_bytes(K)])
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized;
+
+impl<const K: u8> fmt::Debug for Position<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("Position")
+            .field(&usize::from(*self))
+            .finish()
+    }
+}
+
+impl<const K: u8> From<usize> for Position<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Will silently drop data if [`K`] is too small to store useful data in [`usize`], higher
+    /// level code must ensure this never happens, method is not exposed outside of this crate and
+    /// crate boundaries are supposed to protect this invariant. Exposing error handling from here
+    /// will be too noisy and seemed not worth it.
+    fn from(value: usize) -> Self {
+        let mut output = [0; x_size_bytes(K)];
+        // Copy last bytes
+        output.copy_from_slice(&value.to_be_bytes()[mem::size_of::<usize>() - Self::BYTES..]);
+        Self(output)
+    }
+}
+
+impl<const K: u8> From<Position<K>> for usize
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Panics if conversion to [`usize`] fails, higher level code must ensure this never happens,
+    /// method is not exposed outside of this crate and crate boundaries are supposed to protect
+    /// this invariant. Exposing error handling from here will be too noisy and seemed not worth it.
+    fn from(value: Position<K>) -> Self {
+        let mut output = 0_usize.to_be_bytes();
+        output[mem::size_of::<usize>() - Position::<K>::BYTES..].copy_from_slice(&value.0);
+        usize::from_be_bytes(output)
+    }
+}
+
+impl<const K: u8> Position<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    /// Size in bytes
+    const BYTES: usize = x_size_bytes(K);
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/tables.rs b/crates/subspace-proof-of-space/src/chiapos/tables.rs
new file mode 100644
index 00000000000..fc8d70b44b9
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/tables.rs
@@ -0,0 +1,356 @@
+#[cfg(test)]
+mod tests;
+
+use crate::chiapos::table::types::{CopyBitsDestination, Metadata, Position, X, Y};
+pub use crate::chiapos::table::TablesCache;
+use crate::chiapos::table::{
+    compute_f1, compute_fn, fn_hashing_input_bytes, max_metadata_size_bits, metadata_size_bytes,
+    num_matches, partial_y, x_size_bytes, y_size_bits, y_size_bytes, Table,
+};
+use crate::chiapos::utils::EvaluatableUsize;
+use crate::chiapos::{Challenge, Quality, Seed};
+use bitvec::prelude::*;
+use core::mem;
+use sha2::{Digest, Sha256};
+
+/// Pick position in `table_number` based on challenge bits
+const fn pick_position<const K: u8>(
+    [left_position, right_position]: [Position<K>; 2],
+    last_5_challenge_bits: u8,
+    table_number: u8,
+) -> Position<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+{
+    if ((last_5_challenge_bits >> (table_number - 2)) & 1) == 0 {
+        left_position
+    } else {
+        right_position
+    }
+}
+
+pub const fn quality_hashing_buffer_bytes(k: u8) -> usize {
+    mem::size_of::<Challenge>() + (usize::from(k) * 2).div_ceil(u8::BITS as usize)
+}
+
+/// Collection of Chia tables
+#[derive(Debug)]
+pub(super) struct TablesGeneric<const K: u8>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 1) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 2) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 3) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 4) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 5) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 6) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 7) }>: Sized,
+{
+    table_1: Table<K, 1>,
+    table_2: Table<K, 2>,
+    table_3: Table<K, 3>,
+    table_4: Table<K, 4>,
+    table_5: Table<K, 5>,
+    table_6: Table<K, 6>,
+    table_7: Table<K, 7>,
+}
+
+impl<const K: u8> TablesGeneric<K>
+where
+    EvaluatableUsize<{ x_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ y_size_bytes(K) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 1) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 2) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 3) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 4) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 5) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 6) }>: Sized,
+    EvaluatableUsize<{ metadata_size_bytes(K, 7) }>: Sized,
+    EvaluatableUsize<{ fn_hashing_input_bytes(K) }>: Sized,
+    EvaluatableUsize<{ quality_hashing_buffer_bytes(K) }>: Sized,
+    EvaluatableUsize<{ 64 * K as usize / 8 }>: Sized,
+{
+    /// Create Chia proof of space tables.
+    ///
+    /// Advanced version of [`Self::create_simple`] that allows to reuse allocations using cache.
+    ///
+    /// ## Panics
+    /// Panics when [`K`] is too large due to values not fitting into [`ValueNumberT`]. Also
+    /// panics if `K` is too large for buckets to be kept in memory on current platform.
+    pub(super) fn create(seed: Seed, cache: &mut TablesCache<K>) -> Self {
+        let heap_size_bits = usize::MAX as u128 * u128::from(u8::BITS);
+        let num_values = 1 << K;
+        // Check that space for `y` values can be allocated on the heap
+        assert!(num_values * y_size_bits(K) as u128 <= heap_size_bits);
+        // Check that positions can be allocated on the heap
+        assert!(num_values * u128::from(K) * 2 <= heap_size_bits);
+        // Check that metadata can be allocated on the heap
+        assert!(num_values * max_metadata_size_bits(K) as u128 * 2 <= heap_size_bits);
+        // `y` must fit into `usize`
+        assert!(y_size_bits(K) <= usize::BITS as usize);
+
+        let table_1 = Table::<K, 1>::create(seed);
+        let table_2 = Table::<K, 2>::create(&table_1, cache);
+        let table_3 = Table::<K, 3>::create(&table_2, cache);
+        let table_4 = Table::<K, 4>::create(&table_3, cache);
+        let table_5 = Table::<K, 5>::create(&table_4, cache);
+        let table_6 = Table::<K, 6>::create(&table_5, cache);
+        let table_7 = Table::<K, 7>::create(&table_6, cache);
+
+        Self {
+            table_1,
+            table_2,
+            table_3,
+            table_4,
+            table_5,
+            table_6,
+            table_7,
+        }
+    }
+
+    /// Find proof of space quality for given challenge.
+    pub(super) fn find_quality<'a>(
+        &'a self,
+        challenge: &'a Challenge,
+    ) -> impl Iterator<Item = Quality> + 'a
+    where
+        EvaluatableUsize<{ mem::size_of::<Challenge>() + K as usize * 2 }>: Sized,
+    {
+        let last_5_challenge_bits = challenge[challenge.len() - 1] & 0b00011111;
+
+        let ys = self.table_7.ys();
+        // We take advantage of the fact that entries are sorted by `y` (as big-endian numbers) to
+        // quickly seek to desired offset
+        let mut first_k_challenge_bits = Y::<K>::default();
+        first_k_challenge_bits.copy_bits_from(challenge, 0_usize, usize::from(K), 0_usize);
+        let first_matching_element = ys
+            .binary_search(&first_k_challenge_bits)
+            .unwrap_or_else(|insert| insert);
+
+        // Iterate just over elements that are matching `first_k_challenge_bits` prefix
+        ys[first_matching_element..]
+            .iter()
+            .take_while(move |&y| {
+                let mut y_k_bits = Y::<K>::default();
+                y_k_bits.copy_bits_from(y, 0_usize, usize::from(K), 0_usize);
+                // Check if first K bits match
+                y_k_bits == first_k_challenge_bits
+            })
+            .zip(first_matching_element..)
+            .map(move |(_y, position)| {
+                let positions = self
+                    .table_7
+                    .position(Position::<K>::from(position))
+                    .expect("Internally generated pointers must be correct; qed");
+                let positions = self
+                    .table_6
+                    .position(pick_position(positions, last_5_challenge_bits, 6))
+                    .expect("Internally generated pointers must be correct; qed");
+                let positions = self
+                    .table_5
+                    .position(pick_position(positions, last_5_challenge_bits, 5))
+                    .expect("Internally generated pointers must be correct; qed");
+                let positions = self
+                    .table_4
+                    .position(pick_position(positions, last_5_challenge_bits, 4))
+                    .expect("Internally generated pointers must be correct; qed");
+                let positions = self
+                    .table_3
+                    .position(pick_position(positions, last_5_challenge_bits, 3))
+                    .expect("Internally generated pointers must be correct; qed");
+                let [left_position, right_position] = self
+                    .table_2
+                    .position(pick_position(positions, last_5_challenge_bits, 2))
+                    .expect("Internally generated pointers must be correct; qed");
+
+                let left_x = self
+                    .table_1
+                    .xs()
+                    .get(usize::from(left_position))
+                    .expect("Internally generated pointers must be correct; qed");
+                let right_x = self
+                    .table_1
+                    .xs()
+                    .get(usize::from(right_position))
+                    .expect("Internally generated pointers must be correct; qed");
+
+                let mut buffer = [0; mem::size_of::<Challenge>() + K as usize * 2];
+
+                buffer[..mem::size_of::<Challenge>()].copy_from_slice(challenge);
+                buffer.copy_bits_from(
+                    left_x,
+                    0_usize,
+                    usize::from(K),
+                    mem::size_of::<Challenge>() * u8::BITS as usize,
+                );
+                buffer.copy_bits_from(
+                    right_x,
+                    0_usize,
+                    usize::from(K),
+                    mem::size_of::<Challenge>() * u8::BITS as usize + usize::from(K),
+                );
+
+                let mut hasher = Sha256::new();
+                hasher.update(buffer);
+                hasher.finalize().into()
+            })
+    }
+
+    /// Find proof of space for given challenge.
+    pub(super) fn find_proof<'a>(
+        &'a self,
+        challenge: &'a Challenge,
+    ) -> impl Iterator<Item = [u8; 64 * K as usize / 8]> + 'a {
+        let ys = self.table_7.ys();
+        // We take advantage of the fact that entries are sorted by `y` (as big-endian numbers) to
+        // quickly seek to desired offset
+        let mut first_k_challenge_bits = Y::<K>::default();
+        first_k_challenge_bits.copy_bits_from(challenge, 0_usize, usize::from(K), 0_usize);
+        let first_matching_element = ys
+            .binary_search(&first_k_challenge_bits)
+            .unwrap_or_else(|insert| insert);
+
+        // Iterate just over elements that are matching `first_k_challenge_bits` prefix
+        ys[first_matching_element..]
+            .iter()
+            .take_while(move |&y| {
+                let mut y_k_bits = Y::<K>::default();
+                y_k_bits.copy_bits_from(y, 0_usize, usize::from(K), 0_usize);
+                // Check if first K bits match
+                y_k_bits == first_k_challenge_bits
+            })
+            .zip(first_matching_element..)
+            .map(move |(_y, position)| {
+                let mut proof = [0u8; 64 * K as usize / 8];
+
+                self.table_7
+                    .position(Position::<K>::from(position))
+                    .expect("Internally generated pointers must be correct; qed")
+                    .into_iter()
+                    .flat_map(|position| {
+                        self.table_6
+                            .position(position)
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .flat_map(|position| {
+                        self.table_5
+                            .position(position)
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .flat_map(|position| {
+                        self.table_4
+                            .position(position)
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .flat_map(|position| {
+                        self.table_3
+                            .position(position)
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .flat_map(|position| {
+                        self.table_2
+                            .position(position)
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .map(|position| {
+                        self.table_1
+                            .xs()
+                            .get(usize::from(position))
+                            .expect("Internally generated pointers must be correct; qed")
+                    })
+                    .enumerate()
+                    .for_each(|(offset, x)| {
+                        proof.copy_bits_from(x, 0_usize, usize::from(K), usize::from(K) * offset)
+                    });
+
+                proof
+            })
+    }
+
+    /// Verify proof of space for given seed and challenge.
+    ///
+    /// Returns quality on successful verification.
+    pub(super) fn verify(
+        seed: Seed,
+        challenge: &Challenge,
+        proof_of_space: &[u8; 64 * K as usize / 8],
+    ) -> Option<Quality>
+    where
+        EvaluatableUsize<{ (K as usize * 2).div_ceil(u8::BITS as usize) }>: Sized,
+        EvaluatableUsize<{ mem::size_of::<Challenge>() + K as usize * 2 }>: Sized,
+    {
+        let last_5_challenge_bits = challenge[challenge.len() - 1] & 0b00011111;
+
+        let ys_and_metadata = (0..64_usize)
+            .map(|offset| {
+                let mut x = X::default();
+                x.copy_bits_from(
+                    proof_of_space,
+                    usize::from(K) * offset,
+                    usize::from(K),
+                    0_usize,
+                );
+
+                let (partial_y, partial_y_offset) = partial_y::<K>(seed, usize::from(&x));
+                let y = compute_f1::<K>(x, &partial_y, partial_y_offset);
+
+                let mut metadata = Metadata::<K, 1>::default();
+                metadata.copy_bits_from(&x, 0_usize, K, 0_usize);
+                (y, metadata)
+            })
+            .collect::<Vec<_>>();
+
+        Self::collect_ys_and_metadata::<2, 1>(&ys_and_metadata)
+            .and_then(|ys_and_metadata| Self::collect_ys_and_metadata::<3, 2>(&ys_and_metadata))
+            .and_then(|ys_and_metadata| Self::collect_ys_and_metadata::<4, 3>(&ys_and_metadata))
+            .and_then(|ys_and_metadata| Self::collect_ys_and_metadata::<5, 4>(&ys_and_metadata))
+            .and_then(|ys_and_metadata| Self::collect_ys_and_metadata::<6, 5>(&ys_and_metadata))
+            .and_then(|ys_and_metadata| Self::collect_ys_and_metadata::<7, 6>(&ys_and_metadata))
+            .filter(|ys_and_metadata| {
+                let (y, _metadata) = ys_and_metadata
+                    .first()
+                    .expect("On success returns exactly one entry; qed");
+
+                y.starts_with(&challenge.view_bits::<Msb0>()[..usize::from(K)])
+            })
+            .map(|_| {
+                let mut buffer = [0; mem::size_of::<Challenge>() + K as usize * 2];
+
+                buffer[..mem::size_of::<Challenge>()].copy_from_slice(challenge);
+                let mut quality_index = 0_usize.to_be_bytes();
+                quality_index[0] = last_5_challenge_bits;
+                let quality_index = usize::from_be_bytes(quality_index);
+
+                buffer.copy_bits_from(
+                    proof_of_space,
+                    quality_index * usize::from(K) * 2,
+                    usize::from(K) * 2,
+                    mem::size_of::<Challenge>() * u8::BITS as usize,
+                );
+
+                let mut hasher = Sha256::new();
+                hasher.update(buffer);
+                hasher.finalize().into()
+            })
+    }
+
+    fn collect_ys_and_metadata<const TABLE_NUMBER: u8, const PARENT_TABLE_NUMBER: u8>(
+        ys_and_metadata: &[(Y<K>, Metadata<K, PARENT_TABLE_NUMBER>)],
+    ) -> Option<Vec<(Y<K>, Metadata<K, TABLE_NUMBER>)>>
+    where
+        EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
+        EvaluatableUsize<{ metadata_size_bytes(K, PARENT_TABLE_NUMBER) }>: Sized,
+    {
+        ys_and_metadata
+            .array_chunks::<2>()
+            .map(|&[(left_y, left_metadata), (right_y, right_metadata)]| {
+                (num_matches(&left_y, &right_y) == 1).then_some(compute_fn(
+                    left_y,
+                    left_metadata,
+                    right_metadata,
+                ))
+            })
+            .collect()
+    }
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/tables/tests.rs b/crates/subspace-proof-of-space/src/chiapos/tables/tests.rs
new file mode 100644
index 00000000000..0cfbf49b75a
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/tables/tests.rs
@@ -0,0 +1,34 @@
+use crate::chiapos::Tables;
+use std::mem;
+
+const K: u8 = 17;
+
+#[test]
+fn self_verification() {
+    let seed = [1; 32];
+    let tables = Tables::<K>::create_simple(seed);
+
+    for challenge_index in 0..1000_u32 {
+        let mut challenge = [0; 32];
+        challenge[..mem::size_of::<u32>()].copy_from_slice(&challenge_index.to_le_bytes());
+        let qualities = tables.find_quality(&challenge).collect::<Vec<_>>();
+        let proofs = tables.find_proof(&challenge).collect::<Vec<_>>();
+
+        assert_eq!(qualities.len(), proofs.len());
+
+        for (quality, proof) in qualities.into_iter().zip(&proofs) {
+            assert_eq!(
+                Some(quality),
+                Tables::<K>::verify(seed, &challenge, proof),
+                "challenge index {challenge_index}"
+            );
+            let mut bad_challenge = [0; 32];
+            bad_challenge[..mem::size_of::<u32>()]
+                .copy_from_slice(&(challenge_index + 1).to_le_bytes());
+            assert!(
+                Tables::<K>::verify(seed, &bad_challenge, proof).is_none(),
+                "challenge index {challenge_index}"
+            );
+        }
+    }
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/tests.rs b/crates/subspace-proof-of-space/src/chiapos/tests.rs
new file mode 100644
index 00000000000..9fb2dd8fd92
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/tests.rs
@@ -0,0 +1,40 @@
+use crate::chiapos::{Seed, Tables};
+use std::mem;
+
+const K: u8 = 17;
+
+/// Chia does this for some reason 🤷‍
+fn to_chia_seed(seed: &Seed) -> Seed {
+    let mut chia_seed = [1u8; 32];
+    chia_seed[1..].copy_from_slice(&seed[..31]);
+    chia_seed
+}
+
+#[test]
+fn test_against_chiapos() {
+    let seed = [1; 32];
+    let original_table = subspace_chiapos::Table::generate(&seed);
+    let chia_seed = to_chia_seed(&seed);
+    let tables = Tables::<K>::create_simple(chia_seed);
+
+    for challenge_index in (0..1u32 << 16).map(|_| rand::random::<u32>()) {
+        let mut challenge = [0; 32];
+        challenge[..mem::size_of::<u32>()].copy_from_slice(&challenge_index.to_le_bytes());
+
+        let maybe_original_proof = original_table
+            .find_quality(challenge_index)
+            .map(|quality| quality.create_proof());
+
+        let found_proofs = tables.find_proof(&challenge).collect::<Vec<_>>();
+
+        if let Some(original_proof) = maybe_original_proof {
+            assert!(Tables::<K>::verify(chia_seed, &challenge, &original_proof).is_some());
+
+            assert!(!found_proofs.is_empty());
+        }
+
+        for proof in &found_proofs {
+            assert!(subspace_chiapos::is_proof_valid(&seed, challenge_index, proof).is_some());
+        }
+    }
+}
diff --git a/crates/subspace-proof-of-space/src/chiapos/utils.rs b/crates/subspace-proof-of-space/src/chiapos/utils.rs
new file mode 100644
index 00000000000..5e22958ebad
--- /dev/null
+++ b/crates/subspace-proof-of-space/src/chiapos/utils.rs
@@ -0,0 +1,4 @@
+/// TODO: Workaround for "unconstrained generic constant" suggested in
+///  https://github.com/rust-lang/rust/issues/82509#issuecomment-1165533546
+#[derive(Debug)]
+pub struct EvaluatableUsize<const N: usize>;
diff --git a/crates/subspace-proof-of-space/src/lib.rs b/crates/subspace-proof-of-space/src/lib.rs
index c082e6d0c6d..6553627c74e 100644
--- a/crates/subspace-proof-of-space/src/lib.rs
+++ b/crates/subspace-proof-of-space/src/lib.rs
@@ -1,10 +1,24 @@
 //! Subspace proof of space implementation based on Chia
 #![cfg_attr(not(feature = "std"), no_std)]
+// `generic_const_exprs` is an incomplete feature
+#![allow(incomplete_features)]
 #![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
-#![feature(const_trait_impl)]
+#![feature(
+    array_chunks,
+    array_windows,
+    const_num_from_num,
+    const_trait_impl,
+    generic_const_exprs,
+    int_roundings,
+    iter_collect_into
+)]
 
+#[cfg(feature = "chia")]
+pub mod chia;
 #[cfg(feature = "chia-legacy")]
 pub mod chia_legacy;
+#[cfg(feature = "chia")]
+pub mod chiapos;
 #[cfg(feature = "shim")]
 pub mod shim;
 
@@ -25,6 +39,9 @@ pub enum PosTableType {
     /// Chia table
     #[cfg(feature = "chia-legacy")]
     ChiaLegacy,
+    /// Chia table
+    #[cfg(feature = "chia")]
+    Chia,
     /// Shim table
     #[cfg(feature = "shim")]
     Shim,