From 0ca7c65efde2a0ea800360d5b94af260af86a667 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Fri, 24 Feb 2023 12:43:01 +0100
Subject: [PATCH] wasm-executor: Support growing the memory (#12520)

* As always, start with something :P

* Add support for max_heap_pages

* Add support for wasmtime

* Make it compile

* Fix compilation

* Copy wrongly merged code

* Fix compilation

* Some fixes

* Fix

* Get stuff working

* More work

* More fixes

* ...

* More

* FIXEs

* Switch wasmi to use `RuntimeBlob` like wasmtime

* Removed unused stuff

* Cleanup

* More cleanups

* Introduce `CallContext`

* Fixes

* More fixes

* Add builder for creating the `WasmExecutor`

* Adds some docs

* FMT

* First round of feedback.

* Review feedback round 2

* More fixes

* Fix try-runtime

* Update client/executor/wasmtime/src/instance_wrapper.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/executor/common/src/wasm_runtime.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/executor/common/src/runtime_blob/runtime_blob.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/executor/common/src/wasm_runtime.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/allocator/src/freeing_bump.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/allocator/src/freeing_bump.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Feedback round 3

* FMT

* Review comments

---------

Co-authored-by: Koute <koute@users.noreply.github.com>
---
 Cargo.lock                                    |  14 +
 bin/node/executor/benches/bench.rs            |  30 +-
 bin/node/executor/tests/common.rs             |   4 +-
 client/allocator/src/error.rs                 |   2 +-
 client/allocator/src/freeing_bump.rs          | 403 +++++++++++-------
 client/allocator/src/lib.rs                   |  32 ++
 client/api/src/call_executor.rs               |   2 +
 client/executor/Cargo.toml                    |   3 +-
 client/executor/benches/bench.rs              |   9 +-
 client/executor/common/Cargo.toml             |   2 +-
 .../common/src/runtime_blob/runtime_blob.rs   |  26 +-
 client/executor/common/src/util.rs            |   6 +-
 client/executor/common/src/wasm_runtime.rs    |  26 ++
 client/executor/runtime-test/src/lib.rs       |   9 +
 .../executor/src/integration_tests/linux.rs   |  49 ++-
 client/executor/src/integration_tests/mod.rs  |  39 +-
 client/executor/src/native_executor.rs        | 250 +++++++++--
 client/executor/src/wasm_runtime.rs           |  30 +-
 client/executor/wasmi/Cargo.toml              |   2 +-
 client/executor/wasmi/src/lib.rs              | 205 +++++----
 client/executor/wasmtime/Cargo.toml           |   2 +-
 client/executor/wasmtime/src/host.rs          |  51 ++-
 .../executor/wasmtime/src/instance_wrapper.rs | 106 +++--
 client/executor/wasmtime/src/runtime.rs       |  84 +---
 client/executor/wasmtime/src/tests.rs         | 260 +++++------
 client/finality-grandpa/src/lib.rs            |   3 +-
 .../rpc-spec-v2/src/chain_head/chain_head.rs  |   3 +-
 client/rpc/src/state/state_full.rs            |   2 +
 client/service/src/client/call_executor.rs    |  16 +-
 client/service/test/src/client/mod.rs         |   8 +-
 primitives/core/src/traits.rs                 |  19 +-
 primitives/state-machine/src/lib.rs           |  14 +-
 primitives/wasm-interface/Cargo.toml          |   2 +-
 test-utils/runtime/src/lib.rs                 |   9 +-
 test-utils/runtime/src/system.rs              |  20 +-
 .../benchmarking-cli/src/pallet/command.rs    |  13 +-
 utils/frame/try-runtime/cli/src/lib.rs        |   4 +-
 37 files changed, 1092 insertions(+), 667 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 7cfde057041e3..299f639219437 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7653,6 +7653,18 @@ version = "0.6.28"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
 
+[[package]]
+name = "region"
+version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
 [[package]]
 name = "remove_dir_all"
 version = "0.5.3"
@@ -8404,6 +8416,7 @@ dependencies = [
  "sp-runtime",
  "sp-runtime-interface",
  "sp-state-machine",
+ "sp-tracing",
  "sp-trie",
  "sp-version",
  "sp-wasm-interface",
@@ -11956,6 +11969,7 @@ dependencies = [
  "memory_units",
  "num-rational",
  "num-traits",
+ "region",
 ]
 
 [[package]]
diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs
index 4154d1cffcf11..19e7b158a8c81 100644
--- a/bin/node/executor/benches/bench.rs
+++ b/bin/node/executor/benches/bench.rs
@@ -31,7 +31,7 @@ use sc_executor::{
 };
 use sp_core::{
 	storage::well_known_keys,
-	traits::{CodeExecutor, RuntimeCode},
+	traits::{CallContext, CodeExecutor, RuntimeCode},
 };
 use sp_runtime::traits::BlakeTwo256;
 use sp_state_machine::TestExternalities as CoreTestExternalities;
@@ -112,20 +112,41 @@ fn construct_block<E: Externalities>(
 
 	// execute the block to get the real header.
 	executor
-		.call(ext, &runtime_code, "Core_initialize_block", &header.encode(), true)
+		.call(
+			ext,
+			&runtime_code,
+			"Core_initialize_block",
+			&header.encode(),
+			true,
+			CallContext::Offchain,
+		)
 		.0
 		.unwrap();
 
 	for i in extrinsics.iter() {
 		executor
-			.call(ext, &runtime_code, "BlockBuilder_apply_extrinsic", &i.encode(), true)
+			.call(
+				ext,
+				&runtime_code,
+				"BlockBuilder_apply_extrinsic",
+				&i.encode(),
+				true,
+				CallContext::Offchain,
+			)
 			.0
 			.unwrap();
 	}
 
 	let header = Header::decode(
 		&mut &executor
-			.call(ext, &runtime_code, "BlockBuilder_finalize_block", &[0u8; 0], true)
+			.call(
+				ext,
+				&runtime_code,
+				"BlockBuilder_finalize_block",
+				&[0u8; 0],
+				true,
+				CallContext::Offchain,
+			)
 			.0
 			.unwrap()[..],
 	)
@@ -201,6 +222,7 @@ fn bench_execute_block(c: &mut Criterion) {
 								"Core_execute_block",
 								&block.0,
 								use_native,
+								CallContext::Offchain,
 							)
 							.0
 							.unwrap();
diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs
index b428e32ea8b8d..036528f8dae1f 100644
--- a/bin/node/executor/tests/common.rs
+++ b/bin/node/executor/tests/common.rs
@@ -26,7 +26,7 @@ use sp_consensus_babe::{
 use sp_core::{
 	crypto::KeyTypeId,
 	sr25519::Signature,
-	traits::{CodeExecutor, RuntimeCode},
+	traits::{CallContext, CodeExecutor, RuntimeCode},
 };
 use sp_runtime::{
 	traits::{BlakeTwo256, Header as HeaderT},
@@ -114,7 +114,7 @@ pub fn executor_call(
 		heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()),
 	};
 	sp_tracing::try_init_simple();
-	executor().call(&mut t, &runtime_code, method, data, use_native)
+	executor().call(&mut t, &runtime_code, method, data, use_native, CallContext::Onchain)
 }
 
 pub fn new_test_ext(code: &[u8]) -> TestExternalities<BlakeTwo256> {
diff --git a/client/allocator/src/error.rs b/client/allocator/src/error.rs
index a3d91a3e2bacc..08d84317b6503 100644
--- a/client/allocator/src/error.rs
+++ b/client/allocator/src/error.rs
@@ -16,7 +16,7 @@
 // limitations under the License.
 
 /// The error type used by the allocators.
-#[derive(thiserror::Error, Debug)]
+#[derive(thiserror::Error, Debug, PartialEq)]
 pub enum Error {
 	/// Someone tried to allocate more memory than the allowed maximum per allocation.
 	#[error("Requested allocation size is too large")]
diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs
index fdd63bd66aba8..c3cb827afec08 100644
--- a/client/allocator/src/freeing_bump.rs
+++ b/client/allocator/src/freeing_bump.rs
@@ -67,10 +67,11 @@
 //!   wasted. This is more pronounced (in terms of absolute heap amounts) with larger allocation
 //!   sizes.
 
-use crate::Error;
+use crate::{Error, Memory, MAX_WASM_PAGES, PAGE_SIZE};
 pub use sp_core::MAX_POSSIBLE_ALLOCATION;
 use sp_wasm_interface::{Pointer, WordSize};
 use std::{
+	cmp::{max, min},
 	mem,
 	ops::{Index, IndexMut, Range},
 };
@@ -237,7 +238,7 @@ impl Header {
 	///
 	/// Returns an error if the `header_ptr` is out of bounds of the linear memory or if the read
 	/// header is corrupted (e.g. the order is incorrect).
-	fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> {
+	fn read_from(memory: &impl Memory, header_ptr: u32) -> Result<Self, Error> {
 		let raw_header = memory.read_le_u64(header_ptr)?;
 
 		// Check if the header represents an occupied or free allocation and extract the header data
@@ -255,7 +256,7 @@ impl Header {
 	/// Write out this header to memory.
 	///
 	/// Returns an error if the `header_ptr` is out of bounds of the linear memory.
-	fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> {
+	fn write_into(&self, memory: &mut impl Memory, header_ptr: u32) -> Result<(), Error> {
 		let (header_data, occupied_mask) = match *self {
 			Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000),
 			Self::Free(link) => (link.into_raw(), 0x00000000_00000000),
@@ -343,6 +344,15 @@ pub struct AllocationStats {
 	pub address_space_used: u32,
 }
 
+/// Convert the given `size` in bytes into the number of pages.
+///
+/// The returned number of pages is ensured to be big enough to hold memory with the given `size`.
+///
+/// Returns `None` if the number of pages to not fit into `u32`.
+fn pages_from_size(size: u64) -> Option<u32> {
+	u32::try_from((size + PAGE_SIZE as u64 - 1) / PAGE_SIZE as u64).ok()
+}
+
 /// An implementation of freeing bump allocator.
 ///
 /// Refer to the module-level documentation for further details.
@@ -351,7 +361,7 @@ pub struct FreeingBumpHeapAllocator {
 	bumper: u32,
 	free_lists: FreeLists,
 	poisoned: bool,
-	last_observed_memory_size: u32,
+	last_observed_memory_size: u64,
 	stats: AllocationStats,
 }
 
@@ -395,9 +405,9 @@ impl FreeingBumpHeapAllocator {
 	///
 	/// - `mem` - a slice representing the linear memory on which this allocator operates.
 	/// - `size` - size in bytes of the allocation request
-	pub fn allocate<M: Memory + ?Sized>(
+	pub fn allocate(
 		&mut self,
-		mem: &mut M,
+		mem: &mut impl Memory,
 		size: WordSize,
 	) -> Result<Pointer<u8>, Error> {
 		if self.poisoned {
@@ -412,7 +422,7 @@ impl FreeingBumpHeapAllocator {
 		let header_ptr: u32 = match self.free_lists[order] {
 			Link::Ptr(header_ptr) => {
 				assert!(
-					header_ptr + order.size() + HEADER_SIZE <= mem.size(),
+					u64::from(header_ptr + order.size() + HEADER_SIZE) <= mem.size(),
 					"Pointer is looked up in list of free entries, into which
 					only valid values are inserted; qed"
 				);
@@ -427,7 +437,7 @@ impl FreeingBumpHeapAllocator {
 			},
 			Link::Nil => {
 				// Corresponding free list is empty. Allocate a new item.
-				Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?
+				Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem)?
 			},
 		};
 
@@ -437,7 +447,7 @@ impl FreeingBumpHeapAllocator {
 		self.stats.bytes_allocated += order.size() + HEADER_SIZE;
 		self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE);
 		self.stats.bytes_allocated_peak =
-			std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated);
+			max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated);
 		self.stats.address_space_used = self.bumper - self.original_heap_base;
 
 		log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats);
@@ -457,11 +467,7 @@ impl FreeingBumpHeapAllocator {
 	///
 	/// - `mem` - a slice representing the linear memory on which this allocator operates.
 	/// - `ptr` - pointer to the allocated chunk
-	pub fn deallocate<M: Memory + ?Sized>(
-		&mut self,
-		mem: &mut M,
-		ptr: Pointer<u8>,
-	) -> Result<(), Error> {
+	pub fn deallocate(&mut self, mem: &mut impl Memory, ptr: Pointer<u8>) -> Result<(), Error> {
 		if self.poisoned {
 			return Err(error("the allocator has been poisoned"))
 		}
@@ -503,15 +509,52 @@ impl FreeingBumpHeapAllocator {
 	///
 	/// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if
 	/// the operation would exhaust the heap.
-	fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result<u32, Error> {
-		if *bumper + size > heap_end {
-			log::error!(
-				target: LOG_TARGET,
-				"running out of space with current bumper {}, mem size {}",
-				bumper,
-				heap_end
+	fn bump(bumper: &mut u32, size: u32, memory: &mut impl Memory) -> Result<u32, Error> {
+		let required_size = u64::from(*bumper) + u64::from(size);
+
+		if required_size > memory.size() {
+			let required_pages =
+				pages_from_size(required_size).ok_or_else(|| Error::AllocatorOutOfSpace)?;
+
+			let current_pages = memory.pages();
+			let max_pages = memory.max_pages().unwrap_or(MAX_WASM_PAGES);
+			debug_assert!(
+				current_pages < required_pages,
+				"current pages {current_pages} < required pages {required_pages}"
 			);
-			return Err(Error::AllocatorOutOfSpace)
+
+			if current_pages >= max_pages {
+				log::debug!(
+					target: LOG_TARGET,
+					"Wasm pages ({current_pages}) are already at the maximum.",
+				);
+
+				return Err(Error::AllocatorOutOfSpace)
+			} else if required_pages > max_pages {
+				log::debug!(
+					target: LOG_TARGET,
+					"Failed to grow memory from {current_pages} pages to at least {required_pages}\
+						 pages due to the maximum limit of {max_pages} pages",
+				);
+				return Err(Error::AllocatorOutOfSpace)
+			}
+
+			// Ideally we want to double our current number of pages,
+			// as long as it's less than the absolute maximum we can have.
+			let next_pages = min(current_pages * 2, max_pages);
+			// ...but if even more pages are required then try to allocate that many.
+			let next_pages = max(next_pages, required_pages);
+
+			if memory.grow(next_pages - current_pages).is_err() {
+				log::error!(
+					target: LOG_TARGET,
+					"Failed to grow memory from {current_pages} pages to {next_pages} pages",
+				);
+
+				return Err(Error::AllocatorOutOfSpace)
+			}
+
+			debug_assert_eq!(memory.pages(), next_pages, "Number of pages should have increased!");
 		}
 
 		let res = *bumper;
@@ -519,9 +562,9 @@ impl FreeingBumpHeapAllocator {
 		Ok(res)
 	}
 
-	fn observe_memory_size<M: Memory + ?Sized>(
-		last_observed_memory_size: &mut u32,
-		mem: &mut M,
+	fn observe_memory_size(
+		last_observed_memory_size: &mut u64,
+		mem: &mut impl Memory,
 	) -> Result<(), Error> {
 		if mem.size() < *last_observed_memory_size {
 			return Err(Error::MemoryShrinked)
@@ -538,38 +581,42 @@ impl FreeingBumpHeapAllocator {
 /// accessible up to the reported size.
 ///
 /// The linear memory can grow in size with the wasm page granularity (64KiB), but it cannot shrink.
-pub trait Memory {
+trait MemoryExt: Memory {
 	/// Read a u64 from the heap in LE form. Returns an error if any of the bytes read are out of
 	/// bounds.
-	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error>;
-	/// Write a u64 to the heap in LE form. Returns an error if any of the bytes written are out of
-	/// bounds.
-	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>;
-	/// Returns the full size of the memory in bytes.
-	fn size(&self) -> u32;
-}
-
-impl Memory for [u8] {
 	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> {
-		let range =
-			heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?;
-		let bytes = self[range]
-			.try_into()
-			.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
-		Ok(u64::from_le_bytes(bytes))
+		self.with_access(|memory| {
+			let range =
+				heap_range(ptr, 8, memory.len()).ok_or_else(|| error("read out of heap bounds"))?;
+			let bytes = memory[range]
+				.try_into()
+				.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
+			Ok(u64::from_le_bytes(bytes))
+		})
 	}
+
+	/// Write a u64 to the heap in LE form. Returns an error if any of the bytes written are out of
+	/// bounds.
 	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> {
-		let range =
-			heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?;
-		let bytes = val.to_le_bytes();
-		self[range].copy_from_slice(&bytes[..]);
-		Ok(())
+		self.with_access_mut(|memory| {
+			let range = heap_range(ptr, 8, memory.len())
+				.ok_or_else(|| error("write out of heap bounds"))?;
+			let bytes = val.to_le_bytes();
+			memory[range].copy_from_slice(&bytes[..]);
+			Ok(())
+		})
 	}
-	fn size(&self) -> u32 {
-		u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed")
+
+	/// Returns the full size of the memory in bytes.
+	fn size(&self) -> u64 {
+		debug_assert!(self.pages() <= MAX_WASM_PAGES);
+
+		self.pages() as u64 * PAGE_SIZE as u64
 	}
 }
 
+impl<T: Memory> MemoryExt for T {}
+
 fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> {
 	let start = offset as usize;
 	let end = offset.checked_add(length)? as usize;
@@ -601,21 +648,72 @@ impl<'a> Drop for PoisonBomb<'a> {
 mod tests {
 	use super::*;
 
-	const PAGE_SIZE: u32 = 65536;
-
 	/// Makes a pointer out of the given address.
 	fn to_pointer(address: u32) -> Pointer<u8> {
 		Pointer::new(address)
 	}
 
+	#[derive(Debug)]
+	struct MemoryInstance {
+		data: Vec<u8>,
+		max_wasm_pages: u32,
+	}
+
+	impl MemoryInstance {
+		fn with_pages(pages: u32) -> Self {
+			Self { data: vec![0; (pages * PAGE_SIZE) as usize], max_wasm_pages: MAX_WASM_PAGES }
+		}
+
+		fn set_max_wasm_pages(&mut self, max_pages: u32) {
+			self.max_wasm_pages = max_pages;
+		}
+	}
+
+	impl Memory for MemoryInstance {
+		fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R {
+			run(&self.data)
+		}
+
+		fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R {
+			run(&mut self.data)
+		}
+
+		fn pages(&self) -> u32 {
+			pages_from_size(self.data.len() as u64).unwrap()
+		}
+
+		fn max_pages(&self) -> Option<u32> {
+			Some(self.max_wasm_pages)
+		}
+
+		fn grow(&mut self, pages: u32) -> Result<(), ()> {
+			if self.pages() + pages > self.max_wasm_pages {
+				Err(())
+			} else {
+				self.data.resize(((self.pages() + pages) * PAGE_SIZE) as usize, 0);
+				Ok(())
+			}
+		}
+	}
+
+	#[test]
+	fn test_pages_from_size() {
+		assert_eq!(pages_from_size(0).unwrap(), 0);
+		assert_eq!(pages_from_size(1).unwrap(), 1);
+		assert_eq!(pages_from_size(65536).unwrap(), 1);
+		assert_eq!(pages_from_size(65536 + 1).unwrap(), 2);
+		assert_eq!(pages_from_size(2 * 65536).unwrap(), 2);
+		assert_eq!(pages_from_size(2 * 65536 + 1).unwrap(), 3);
+	}
+
 	#[test]
 	fn should_allocate_properly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr = heap.allocate(&mut mem, 1).unwrap();
 
 		// then
 		// returned pointer must start right after `HEADER_SIZE`
@@ -625,11 +723,11 @@ mod tests {
 	#[test]
 	fn should_always_align_pointers_to_multiples_of_8() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(13);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr = heap.allocate(&mut mem, 1).unwrap();
 
 		// then
 		// the pointer must start at the next multiple of 8 from 13
@@ -640,13 +738,13 @@ mod tests {
 	#[test]
 	fn should_increment_pointers_properly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// when
-		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
-		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
-		let ptr3 = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
+		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
+		let ptr3 = heap.allocate(&mut mem, 1).unwrap();
 
 		// then
 		// a prefix of 8 bytes is prepended to each pointer
@@ -663,18 +761,18 @@ mod tests {
 	#[test]
 	fn should_free_properly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
-		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
 		// the prefix of 8 bytes is prepended to the pointer
 		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
 
-		let ptr2 = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr2 = heap.allocate(&mut mem, 1).unwrap();
 		// the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer
 		assert_eq!(ptr2, to_pointer(24));
 
 		// when
-		heap.deallocate(&mut mem[..], ptr2).unwrap();
+		heap.deallocate(&mut mem, ptr2).unwrap();
 
 		// then
 		// then the heads table should contain a pointer to the
@@ -685,23 +783,23 @@ mod tests {
 	#[test]
 	fn should_deallocate_and_reallocate_properly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let padded_offset = 16;
 		let mut heap = FreeingBumpHeapAllocator::new(13);
 
-		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
+		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
 		// the prefix of 8 bytes is prepended to the pointer
 		assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE));
 
-		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
+		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
 		// the padded_offset + the previously allocated ptr (8 bytes prefix +
 		// 8 bytes content) + the prefix of 8 bytes which is prepended to the
 		// current pointer
 		assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE));
 
 		// when
-		heap.deallocate(&mut mem[..], ptr2).unwrap();
-		let ptr3 = heap.allocate(&mut mem[..], 9).unwrap();
+		heap.deallocate(&mut mem, ptr2).unwrap();
+		let ptr3 = heap.allocate(&mut mem, 9).unwrap();
 
 		// then
 		// should have re-allocated
@@ -712,22 +810,22 @@ mod tests {
 	#[test]
 	fn should_build_linked_list_of_free_areas_properly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
-		let ptr1 = heap.allocate(&mut mem[..], 8).unwrap();
-		let ptr2 = heap.allocate(&mut mem[..], 8).unwrap();
-		let ptr3 = heap.allocate(&mut mem[..], 8).unwrap();
+		let ptr1 = heap.allocate(&mut mem, 8).unwrap();
+		let ptr2 = heap.allocate(&mut mem, 8).unwrap();
+		let ptr3 = heap.allocate(&mut mem, 8).unwrap();
 
 		// when
-		heap.deallocate(&mut mem[..], ptr1).unwrap();
-		heap.deallocate(&mut mem[..], ptr2).unwrap();
-		heap.deallocate(&mut mem[..], ptr3).unwrap();
+		heap.deallocate(&mut mem, ptr1).unwrap();
+		heap.deallocate(&mut mem, ptr2).unwrap();
+		heap.deallocate(&mut mem, ptr3).unwrap();
 
 		// then
 		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE));
 
-		let ptr4 = heap.allocate(&mut mem[..], 8).unwrap();
+		let ptr4 = heap.allocate(&mut mem, 8).unwrap();
 		assert_eq!(ptr4, ptr3);
 
 		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
@@ -736,29 +834,28 @@ mod tests {
 	#[test]
 	fn should_not_allocate_if_too_large() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
+		mem.set_max_wasm_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(13);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13);
+		let ptr = heap.allocate(&mut mem, PAGE_SIZE - 13);
 
 		// then
-		match ptr.unwrap_err() {
-			Error::AllocatorOutOfSpace => {},
-			e => panic!("Expected allocator out of space error, got: {:?}", e),
-		}
+		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
 	}
 
 	#[test]
 	fn should_not_allocate_if_full() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
+		mem.set_max_wasm_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
-		let ptr1 = heap.allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
+		let ptr1 = heap.allocate(&mut mem, (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
 		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
 
 		// when
-		let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2);
+		let ptr2 = heap.allocate(&mut mem, PAGE_SIZE / 2);
 
 		// then
 		// there is no room for another half page incl. its 8 byte prefix
@@ -771,11 +868,11 @@ mod tests {
 	#[test]
 	fn should_allocate_max_possible_allocation_size() {
 		// given
-		let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION).unwrap();
+		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION).unwrap();
 
 		// then
 		assert_eq!(ptr, to_pointer(HEADER_SIZE));
@@ -784,60 +881,62 @@ mod tests {
 	#[test]
 	fn should_not_allocate_if_requested_size_too_large() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1);
+		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION + 1);
 
 		// then
-		match ptr.unwrap_err() {
-			Error::RequestedAllocationTooLarge => {},
-			e => panic!("Expected allocation size too large error, got: {:?}", e),
-		}
+		assert_eq!(Error::RequestedAllocationTooLarge, ptr.unwrap_err());
 	}
 
 	#[test]
 	fn should_return_error_when_bumper_greater_than_heap_size() {
 		// given
-		let mut mem = [0u8; 64];
+		let mut mem = MemoryInstance::with_pages(1);
+		mem.set_max_wasm_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
-		let ptr1 = heap.allocate(&mut mem[..], 32).unwrap();
-		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
-		heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1");
-		assert_eq!(heap.stats.bytes_allocated, 0);
-		assert_eq!(heap.bumper, 40);
+		let mut ptrs = Vec::new();
+		for _ in 0..(PAGE_SIZE as usize / 40) {
+			ptrs.push(heap.allocate(&mut mem, 32).expect("Allocate 32 byte"));
+		}
+
+		assert_eq!(heap.stats.bytes_allocated, PAGE_SIZE - 16);
+		assert_eq!(heap.bumper, PAGE_SIZE - 16);
+
+		ptrs.into_iter()
+			.for_each(|ptr| heap.deallocate(&mut mem, ptr).expect("Deallocate 32 byte"));
 
-		let ptr2 = heap.allocate(&mut mem[..], 16).unwrap();
-		assert_eq!(ptr2, to_pointer(48));
-		heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2");
 		assert_eq!(heap.stats.bytes_allocated, 0);
-		assert_eq!(heap.bumper, 64);
+		assert_eq!(heap.stats.bytes_allocated_peak, PAGE_SIZE - 16);
+		assert_eq!(heap.bumper, PAGE_SIZE - 16);
+
+		// Allocate another 8 byte to use the full heap.
+		heap.allocate(&mut mem, 8).expect("Allocate 8 byte");
 
 		// when
 		// the `bumper` value is equal to `size` here and any
 		// further allocation which would increment the bumper must fail.
 		// we try to allocate 8 bytes here, which will increment the
-		// bumper since no 8 byte item has been allocated+freed before.
-		let ptr = heap.allocate(&mut mem[..], 8);
+		// bumper since no 8 byte item has been freed before.
+		assert_eq!(heap.bumper as u64, mem.size());
+		let ptr = heap.allocate(&mut mem, 8);
 
 		// then
-		match ptr.unwrap_err() {
-			Error::AllocatorOutOfSpace => {},
-			e => panic!("Expected allocator out of space error, got: {:?}", e),
-		}
+		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
 	}
 
 	#[test]
 	fn should_include_prefixes_in_total_heap_size() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(1);
 
 		// when
 		// an item size of 16 must be used then
-		heap.allocate(&mut mem[..], 9).unwrap();
+		heap.allocate(&mut mem, 9).unwrap();
 
 		// then
 		assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16);
@@ -846,13 +945,13 @@ mod tests {
 	#[test]
 	fn should_calculate_total_heap_size_to_zero() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(13);
 
 		// when
-		let ptr = heap.allocate(&mut mem[..], 42).unwrap();
+		let ptr = heap.allocate(&mut mem, 42).unwrap();
 		assert_eq!(ptr, to_pointer(16 + HEADER_SIZE));
-		heap.deallocate(&mut mem[..], ptr).unwrap();
+		heap.deallocate(&mut mem, ptr).unwrap();
 
 		// then
 		assert_eq!(heap.stats.bytes_allocated, 0);
@@ -861,13 +960,13 @@ mod tests {
 	#[test]
 	fn should_calculate_total_size_of_zero() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(19);
 
 		// when
 		for _ in 1..10 {
-			let ptr = heap.allocate(&mut mem[..], 42).unwrap();
-			heap.deallocate(&mut mem[..], ptr).unwrap();
+			let ptr = heap.allocate(&mut mem, 42).unwrap();
+			heap.deallocate(&mut mem, ptr).unwrap();
 		}
 
 		// then
@@ -877,13 +976,13 @@ mod tests {
 	#[test]
 	fn should_read_and_write_u64_correctly() {
 		// given
-		let mut mem = [0u8; PAGE_SIZE as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 
 		// when
-		Memory::write_le_u64(mem.as_mut(), 40, 4480113).unwrap();
+		mem.write_le_u64(40, 4480113).unwrap();
 
 		// then
-		let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap();
+		let value = MemoryExt::read_le_u64(&mut mem, 40).unwrap();
 		assert_eq!(value, 4480113);
 	}
 
@@ -913,24 +1012,25 @@ mod tests {
 
 	#[test]
 	fn deallocate_needs_to_maintain_linked_list() {
-		let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// Allocate and free some pointers
-		let ptrs = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
-		ptrs.into_iter().for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap());
+		let ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
+		ptrs.iter().rev().for_each(|ptr| heap.deallocate(&mut mem, *ptr).unwrap());
 
-		// Second time we should be able to allocate all of them again.
-		let _ = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
+		// Second time we should be able to allocate all of them again and get the same pointers!
+		let new_ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
+		assert_eq!(ptrs, new_ptrs);
 	}
 
 	#[test]
 	fn header_read_write() {
 		let roundtrip = |header: Header| {
-			let mut memory = [0u8; 32];
-			header.write_into(memory.as_mut(), 0).unwrap();
+			let mut memory = MemoryInstance::with_pages(1);
+			header.write_into(&mut memory, 0).unwrap();
 
-			let read_header = Header::read_from(memory.as_mut(), 0).unwrap();
+			let read_header = Header::read_from(&memory, 0).unwrap();
 			assert_eq!(header, read_header);
 		};
 
@@ -944,18 +1044,18 @@ mod tests {
 	#[test]
 	fn poison_oom() {
 		// given
-		// a heap of 32 bytes. Should be enough for two allocations.
-		let mut mem = [0u8; 32];
+		let mut mem = MemoryInstance::with_pages(1);
+		mem.set_max_wasm_pages(1);
+
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
 		// when
-		assert!(heap.allocate(mem.as_mut(), 8).is_ok());
-		let alloc_ptr = heap.allocate(mem.as_mut(), 8).unwrap();
-		assert!(heap.allocate(mem.as_mut(), 8).is_err());
+		let alloc_ptr = heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
+		assert_eq!(Error::AllocatorOutOfSpace, heap.allocate(&mut mem, PAGE_SIZE).unwrap_err());
 
 		// then
 		assert!(heap.poisoned);
-		assert!(heap.deallocate(mem.as_mut(), alloc_ptr).is_err());
+		assert!(heap.deallocate(&mut mem, alloc_ptr).is_err());
 	}
 
 	#[test]
@@ -969,36 +1069,41 @@ mod tests {
 
 	#[test]
 	fn accepts_growing_memory() {
-		const ITEM_SIZE: u32 = 16;
-		const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize;
-
-		let mut mem = vec![0u8; ITEM_ON_HEAP_SIZE * 2];
+		let mut mem = MemoryInstance::with_pages(1);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
-		let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap();
-		let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap();
+		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
+		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
 
-		mem.extend_from_slice(&[0u8; ITEM_ON_HEAP_SIZE]);
+		mem.grow(1).unwrap();
 
-		let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap();
+		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
 	}
 
 	#[test]
 	fn doesnt_accept_shrinking_memory() {
-		const ITEM_SIZE: u32 = 16;
-		const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize;
-
-		let initial_size = ITEM_ON_HEAP_SIZE * 3;
-		let mut mem = vec![0u8; initial_size];
+		let mut mem = MemoryInstance::with_pages(2);
 		let mut heap = FreeingBumpHeapAllocator::new(0);
 
-		let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap();
+		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
 
-		mem.truncate(initial_size - 1);
+		mem.data.truncate(PAGE_SIZE as usize);
 
-		match heap.allocate(&mut mem[..], ITEM_SIZE).unwrap_err() {
+		match heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap_err() {
 			Error::MemoryShrinked => (),
 			_ => panic!(),
 		}
 	}
+
+	#[test]
+	fn should_grow_memory_when_running_out_of_memory() {
+		let mut mem = MemoryInstance::with_pages(1);
+		let mut heap = FreeingBumpHeapAllocator::new(0);
+
+		assert_eq!(1, mem.pages());
+
+		heap.allocate(&mut mem, PAGE_SIZE * 2).unwrap();
+
+		assert_eq!(3, mem.pages());
+	}
 }
diff --git a/client/allocator/src/lib.rs b/client/allocator/src/lib.rs
index 31179ab5ddede..e50d7d54c8e97 100644
--- a/client/allocator/src/lib.rs
+++ b/client/allocator/src/lib.rs
@@ -27,3 +27,35 @@ mod freeing_bump;
 
 pub use error::Error;
 pub use freeing_bump::{AllocationStats, FreeingBumpHeapAllocator};
+
+/// The size of one wasm page in bytes.
+///
+/// The wasm memory is divided into pages, meaning the minimum size of a memory is one page.
+const PAGE_SIZE: u32 = 65536;
+
+/// The maximum number of wasm pages that can be allocated.
+///
+/// 4GiB / [`PAGE_SIZE`].
+const MAX_WASM_PAGES: u32 = (4u64 * 1024 * 1024 * 1024 / PAGE_SIZE as u64) as u32;
+
+/// Grants access to the memory for the allocator.
+///
+/// Memory of wasm is allocated in pages. A page has a constant size of 64KiB. The maximum allowed
+/// memory size as defined in the wasm specification is 4GiB (65536 pages).
+pub trait Memory {
+	/// Run the given closure `run` and grant it write access to the raw memory.
+	fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R;
+	/// Run the given closure `run` and grant it read access to the raw memory.
+	fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R;
+	/// Grow the memory by `additional` pages.
+	fn grow(&mut self, additional: u32) -> Result<(), ()>;
+	/// Returns the current number of pages this memory has allocated.
+	fn pages(&self) -> u32;
+	/// Returns the maximum number of pages this memory is allowed to allocate.
+	///
+	/// The returned number needs to be smaller or equal to `65536`. The returned number needs to be
+	/// bigger or equal to [`Self::pages`].
+	///
+	/// If `None` is returned, there is no maximum (besides the maximum defined in the wasm spec).
+	fn max_pages(&self) -> Option<u32>;
+}
diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs
index 42d3c4e3422c4..db8e4d8495af2 100644
--- a/client/api/src/call_executor.rs
+++ b/client/api/src/call_executor.rs
@@ -19,6 +19,7 @@
 //! A method call executor interface.
 
 use sc_executor::{RuntimeVersion, RuntimeVersionOf};
+use sp_core::traits::CallContext;
 use sp_runtime::traits::Block as BlockT;
 use sp_state_machine::{ExecutionStrategy, OverlayedChanges, StorageProof};
 use std::cell::RefCell;
@@ -58,6 +59,7 @@ pub trait CallExecutor<B: BlockT>: RuntimeVersionOf {
 		method: &str,
 		call_data: &[u8],
 		strategy: ExecutionStrategy,
+		context: CallContext,
 	) -> Result<Vec<u8>, sp_blockchain::Error>;
 
 	/// Execute a contextual call on top of state in a block of a given hash.
diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml
index 46b72565afefc..21a9bd70dde65 100644
--- a/client/executor/Cargo.toml
+++ b/client/executor/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 lru = "0.8.1"
 parking_lot = "0.12.1"
 tracing = "0.1.29"
-wasmi = "0.13"
+wasmi = "0.13.2"
 
 codec = { package = "parity-scale-codec", version = "3.2.2" }
 sc-executor-common = { version = "0.10.0-dev", path = "common" }
@@ -43,6 +43,7 @@ sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine"
 sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" }
 sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
+sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" }
 tracing-subscriber = "0.2.19"
 paste = "1.0"
 regex = "1.6.0"
diff --git a/client/executor/benches/bench.rs b/client/executor/benches/bench.rs
index 62e8b261f4786..10425ea461c21 100644
--- a/client/executor/benches/bench.rs
+++ b/client/executor/benches/bench.rs
@@ -21,7 +21,7 @@ use codec::Encode;
 
 use sc_executor_common::{
 	runtime_blob::RuntimeBlob,
-	wasm_runtime::{WasmInstance, WasmModule},
+	wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule},
 };
 use sc_executor_wasmtime::InstantiationStrategy;
 use sc_runtime_test::wasm_binary_unwrap as test_runtime;
@@ -51,13 +51,13 @@ fn initialize(
 ) -> Arc<dyn WasmModule> {
 	let blob = RuntimeBlob::uncompress_if_needed(runtime).unwrap();
 	let host_functions = sp_io::SubstrateHostFunctions::host_functions();
-	let heap_pages = 2048;
+	let extra_pages = 2048;
 	let allow_missing_func_imports = true;
 
 	match method {
 		Method::Interpreted => sc_executor_wasmi::create_runtime(
 			blob,
-			heap_pages,
+			HeapAllocStrategy::Static { extra_pages },
 			host_functions,
 			allow_missing_func_imports,
 		)
@@ -67,12 +67,11 @@ fn initialize(
 				allow_missing_func_imports,
 				cache_path: None,
 				semantics: sc_executor_wasmtime::Semantics {
-					extra_heap_pages: heap_pages,
+					heap_alloc_strategy: HeapAllocStrategy::Static { extra_pages },
 					instantiation_strategy,
 					deterministic_stack_limit: None,
 					canonicalize_nans: false,
 					parallel_compilation: true,
-					max_memory_size: None,
 				},
 			};
 
diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml
index 648e937d371ff..dd74ea2cfd209 100644
--- a/client/executor/common/Cargo.toml
+++ b/client/executor/common/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 thiserror = "1.0.30"
 wasm-instrument = "0.3"
-wasmi = "0.13"
+wasmi = "0.13.2"
 sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" }
 sp-wasm-interface = { version = "7.0.0", path = "../../../primitives/wasm-interface" }
diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs
index dc0cde8d26974..24dc7e393a4bc 100644
--- a/client/executor/common/src/runtime_blob/runtime_blob.rs
+++ b/client/executor/common/src/runtime_blob/runtime_blob.rs
@@ -16,7 +16,7 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::error::WasmError;
+use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy};
 use wasm_instrument::{
 	export_mutable_globals,
 	parity_wasm::elements::{
@@ -157,18 +157,13 @@ impl RuntimeBlob {
 		Ok(())
 	}
 
-	/// Increases the number of memory pages requested by the WASM blob by
-	/// the given amount of `extra_heap_pages`.
+	/// Modifies the blob's memory section according to the given `heap_alloc_strategy`.
 	///
 	/// Will return an error in case there is no memory section present,
 	/// or if the memory section is empty.
-	///
-	/// Only modifies the initial size of the memory; the maximum is unmodified
-	/// unless it's smaller than the initial size, in which case it will be increased
-	/// so that it's at least as big as the initial size.
-	pub fn add_extra_heap_pages_to_memory_section(
+	pub fn setup_memory_according_to_heap_alloc_strategy(
 		&mut self,
-		extra_heap_pages: u32,
+		heap_alloc_strategy: HeapAllocStrategy,
 	) -> Result<(), WasmError> {
 		let memory_section = self
 			.raw_module
@@ -179,8 +174,17 @@ impl RuntimeBlob {
 			return Err(WasmError::Other("memory section is empty".into()))
 		}
 		for memory_ty in memory_section.entries_mut() {
-			let min = memory_ty.limits().initial().saturating_add(extra_heap_pages);
-			let max = memory_ty.limits().maximum().map(|max| std::cmp::max(min, max));
+			let initial = memory_ty.limits().initial();
+			let (min, max) = match heap_alloc_strategy {
+				HeapAllocStrategy::Dynamic { maximum_pages } => {
+					// Ensure `initial <= maximum_pages`
+					(maximum_pages.map(|m| m.min(initial)).unwrap_or(initial), maximum_pages)
+				},
+				HeapAllocStrategy::Static { extra_pages } => {
+					let pages = initial.saturating_add(extra_pages);
+					(pages, Some(pages))
+				},
+			};
 			*memory_ty = MemoryType::new(min, max);
 		}
 		Ok(())
diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs
index e672861cb6f3c..34967f86595d6 100644
--- a/client/executor/common/src/util.rs
+++ b/client/executor/common/src/util.rs
@@ -26,11 +26,7 @@ use std::ops::Range;
 /// Returns None if the end of the range would exceed some maximum offset.
 pub fn checked_range(offset: usize, len: usize, max: usize) -> Option<Range<usize>> {
 	let end = offset.checked_add(len)?;
-	if end <= max {
-		Some(offset..end)
-	} else {
-		None
-	}
+	(end <= max).then(|| offset..end)
 }
 
 /// Provides safe memory access interface using an external buffer
diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs
index 5899252068753..e3db7e52fc7e7 100644
--- a/client/executor/common/src/wasm_runtime.rs
+++ b/client/executor/common/src/wasm_runtime.rs
@@ -119,3 +119,29 @@ pub trait WasmInstance: Send {
 		None
 	}
 }
+
+/// Defines the heap pages allocation strategy the wasm runtime should use.
+///
+/// A heap page is defined as 64KiB of memory.
+#[derive(Debug, Copy, Clone, PartialEq, Hash, Eq)]
+pub enum HeapAllocStrategy {
+	/// Allocate a static number of heap pages.
+	///
+	/// The total number of allocated heap pages is the initial number of heap pages requested by
+	/// the wasm file plus the `extra_pages`.
+	Static {
+		/// The number of pages that will be added on top of the initial heap pages requested by
+		/// the wasm file.
+		extra_pages: u32,
+	},
+	/// Allocate the initial heap pages as requested by the wasm file and then allow it to grow
+	/// dynamically.
+	Dynamic {
+		/// The absolute maximum size of the linear memory (in pages).
+		///
+		/// When `Some(_)` the linear memory will be allowed to grow up to this limit.
+		/// When `None` the linear memory will be allowed to grow up to the maximum limit supported
+		/// by WASM (4GB).
+		maximum_pages: Option<u32>,
+	},
+}
diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs
index 23584006a72c0..2bd2aeeb606eb 100644
--- a/client/executor/runtime-test/src/lib.rs
+++ b/client/executor/runtime-test/src/lib.rs
@@ -328,6 +328,15 @@ sp_core::wasm_export_functions! {
 		assert_eq!(value, -66);
 	}
 
+	fn allocate_two_gigabyte() -> u32 {
+		let mut data = Vec::new();
+		for _ in 0..205 {
+			data.push(Vec::<u8>::with_capacity(10 * 1024 * 1024));
+		}
+
+		data.iter().map(|d| d.capacity() as u32).sum()
+	}
+
 	fn test_abort_on_panic() {
 		sp_io::panic_handler::abort_on_panic("test_abort_on_panic called");
 	}
diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs
index 9afe37037eb2f..38d75da4eb7e8 100644
--- a/client/executor/src/integration_tests/linux.rs
+++ b/client/executor/src/integration_tests/linux.rs
@@ -21,25 +21,60 @@
 use super::mk_test_runtime;
 use crate::WasmExecutionMethod;
 use codec::Encode as _;
+use sc_executor_common::wasm_runtime::HeapAllocStrategy;
 
 mod smaps;
 
 use self::smaps::Smaps;
 
+#[test]
+fn memory_consumption_interpreted() {
+	let _ = sp_tracing::try_init_simple();
+
+	if std::env::var("RUN_TEST").is_ok() {
+		memory_consumption(WasmExecutionMethod::Interpreted);
+	} else {
+		// We need to run the test in isolation, to not getting interfered by the other tests.
+		let executable = std::env::current_exe().unwrap();
+		let output = std::process::Command::new(executable)
+			.env("RUN_TEST", "1")
+			.args(&["--nocapture", "memory_consumption_interpreted"])
+			.output()
+			.unwrap();
+
+		assert!(output.status.success());
+	}
+}
+
 #[test]
 fn memory_consumption_compiled() {
+	let _ = sp_tracing::try_init_simple();
+
+	if std::env::var("RUN_TEST").is_ok() {
+		memory_consumption(WasmExecutionMethod::Compiled {
+			instantiation_strategy:
+				sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse,
+		});
+	} else {
+		// We need to run the test in isolation, to not getting interfered by the other tests.
+		let executable = std::env::current_exe().unwrap();
+		let status = std::process::Command::new(executable)
+			.env("RUN_TEST", "1")
+			.args(&["--nocapture", "memory_consumption_compiled"])
+			.status()
+			.unwrap();
+
+		assert!(status.success());
+	}
+}
+
+fn memory_consumption(wasm_method: WasmExecutionMethod) {
 	// This aims to see if linear memory stays backed by the physical memory after a runtime call.
 	//
 	// For that we make a series of runtime calls, probing the RSS for the VMA matching the linear
 	// memory. After the call we expect RSS to be equal to 0.
 
-	let runtime = mk_test_runtime(
-		WasmExecutionMethod::Compiled {
-			instantiation_strategy:
-				sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse,
-		},
-		1024,
-	);
+	let runtime = mk_test_runtime(wasm_method, HeapAllocStrategy::Static { extra_pages: 1024 });
 
 	let mut instance = runtime.new_instance().unwrap();
 	let heap_base = instance
diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs
index 324c8d515d14a..066b1497fb6ec 100644
--- a/client/executor/src/integration_tests/mod.rs
+++ b/client/executor/src/integration_tests/mod.rs
@@ -24,7 +24,7 @@ use codec::{Decode, Encode};
 use sc_executor_common::{
 	error::{Error, WasmError},
 	runtime_blob::RuntimeBlob,
-	wasm_runtime::WasmModule,
+	wasm_runtime::{HeapAllocStrategy, WasmModule},
 };
 use sc_runtime_test::wasm_binary_unwrap;
 use sp_core::{
@@ -52,11 +52,13 @@ macro_rules! test_wasm_execution {
 		paste::item! {
 			#[test]
 			fn [<$method_name _interpreted>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Interpreted);
 			}
 
 			#[test]
 			fn [<$method_name _compiled_recreate_instance_cow>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Compiled {
 					instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::RecreateInstanceCopyOnWrite
 				});
@@ -64,6 +66,7 @@ macro_rules! test_wasm_execution {
 
 			#[test]
 			fn [<$method_name _compiled_recreate_instance_vanilla>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Compiled {
 					instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::RecreateInstance
 				});
@@ -71,6 +74,7 @@ macro_rules! test_wasm_execution {
 
 			#[test]
 			fn [<$method_name _compiled_pooling_cow>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Compiled {
 					instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::PoolingCopyOnWrite
 				});
@@ -78,6 +82,7 @@ macro_rules! test_wasm_execution {
 
 			#[test]
 			fn [<$method_name _compiled_pooling_vanilla>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Compiled {
 					instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::Pooling
 				});
@@ -85,6 +90,7 @@ macro_rules! test_wasm_execution {
 
 			#[test]
 			fn [<$method_name _compiled_legacy_instance_reuse>]() {
+				let _ = sp_tracing::try_init_simple();
 				$method_name(WasmExecutionMethod::Compiled {
 					instantiation_strategy: sc_executor_wasmtime::InstantiationStrategy::LegacyInstanceReuse
 				});
@@ -474,7 +480,10 @@ fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) {
 	}
 }
 
-fn mk_test_runtime(wasm_method: WasmExecutionMethod, pages: u64) -> Arc<dyn WasmModule> {
+fn mk_test_runtime(
+	wasm_method: WasmExecutionMethod,
+	pages: HeapAllocStrategy,
+) -> Arc<dyn WasmModule> {
 	let blob = RuntimeBlob::uncompress_if_needed(wasm_binary_unwrap())
 		.expect("failed to create a runtime blob out of test runtime");
 
@@ -490,7 +499,8 @@ fn mk_test_runtime(wasm_method: WasmExecutionMethod, pages: u64) -> Arc<dyn Wasm
 
 test_wasm_execution!(returns_mutable_static);
 fn returns_mutable_static(wasm_method: WasmExecutionMethod) {
-	let runtime = mk_test_runtime(wasm_method, 1024);
+	let runtime =
+		mk_test_runtime(wasm_method, HeapAllocStrategy::Dynamic { maximum_pages: Some(1024) });
 
 	let mut instance = runtime.new_instance().unwrap();
 	let res = instance.call_export("returns_mutable_static", &[0]).unwrap();
@@ -505,7 +515,8 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) {
 
 test_wasm_execution!(returns_mutable_static_bss);
 fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) {
-	let runtime = mk_test_runtime(wasm_method, 1024);
+	let runtime =
+		mk_test_runtime(wasm_method, HeapAllocStrategy::Dynamic { maximum_pages: Some(1024) });
 
 	let mut instance = runtime.new_instance().unwrap();
 	let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap();
@@ -530,9 +541,12 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) {
 	//
 	// The fixture performs 2 allocations of 768KB and this theoretically gives 1536KB, however, due
 	// to our allocator algorithm there are inefficiencies.
-	const REQUIRED_MEMORY_PAGES: u64 = 32;
+	const REQUIRED_MEMORY_PAGES: u32 = 32;
 
-	let runtime = mk_test_runtime(wasm_method, REQUIRED_MEMORY_PAGES);
+	let runtime = mk_test_runtime(
+		wasm_method,
+		HeapAllocStrategy::Static { extra_pages: REQUIRED_MEMORY_PAGES },
+	);
 	let mut instance = runtime.new_instance().unwrap();
 
 	// On the first invocation we allocate approx. 768KB (75%) of stack and then trap.
@@ -546,7 +560,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) {
 
 test_wasm_execution!(interpreted_only heap_is_reset_between_calls);
 fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) {
-	let runtime = mk_test_runtime(wasm_method, 1024);
+	let runtime = mk_test_runtime(wasm_method, HeapAllocStrategy::Static { extra_pages: 1024 });
 	let mut instance = runtime.new_instance().unwrap();
 
 	let heap_base = instance
@@ -651,6 +665,15 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) {
 	assert_eq!(len, 2);
 }
 
+test_wasm_execution!(allocate_two_gigabyte);
+fn allocate_two_gigabyte(wasm_method: WasmExecutionMethod) {
+	let runtime = mk_test_runtime(wasm_method, HeapAllocStrategy::Dynamic { maximum_pages: None });
+
+	let mut instance = runtime.new_instance().unwrap();
+	let res = instance.call_export("allocate_two_gigabyte", &[0]).unwrap();
+	assert_eq!(10 * 1024 * 1024 * 205, u32::decode(&mut &res[..]).unwrap());
+}
+
 test_wasm_execution!(memory_is_cleared_between_invocations);
 fn memory_is_cleared_between_invocations(wasm_method: WasmExecutionMethod) {
 	// This is based on the code generated by compiling a runtime *without*
@@ -713,7 +736,7 @@ fn memory_is_cleared_between_invocations(wasm_method: WasmExecutionMethod) {
 
 	let runtime = crate::wasm_runtime::create_wasm_runtime_with_code::<HostFunctions>(
 		wasm_method,
-		1024,
+		HeapAllocStrategy::Dynamic { maximum_pages: Some(1024) },
 		RuntimeBlob::uncompress_if_needed(&binary[..]).unwrap(),
 		true,
 		None,
diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs
index a2af97ae4eacd..c72cf3c9c91df 100644
--- a/client/executor/src/native_executor.rs
+++ b/client/executor/src/native_executor.rs
@@ -32,14 +32,15 @@ use std::{
 use codec::Encode;
 use sc_executor_common::{
 	runtime_blob::RuntimeBlob,
-	wasm_runtime::{AllocationStats, WasmInstance, WasmModule},
+	wasm_runtime::{AllocationStats, HeapAllocStrategy, WasmInstance, WasmModule},
 };
-use sp_core::traits::{CodeExecutor, Externalities, RuntimeCode};
+use sp_core::traits::{CallContext, CodeExecutor, Externalities, RuntimeCode};
 use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion};
 use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions};
 
-/// Default num of pages for the heap
-const DEFAULT_HEAP_PAGES: u64 = 2048;
+/// Default heap allocation strategy.
+const DEFAULT_HEAP_ALLOC_STRATEGY: HeapAllocStrategy =
+	HeapAllocStrategy::Static { extra_pages: 2048 };
 
 /// Set up the externalities and safe calling environment to execute runtime calls.
 ///
@@ -79,13 +80,136 @@ pub trait NativeExecutionDispatch: Send + Sync {
 	fn native_version() -> NativeVersion;
 }
 
+fn unwrap_heap_pages(pages: Option<HeapAllocStrategy>) -> HeapAllocStrategy {
+	pages.unwrap_or_else(|| DEFAULT_HEAP_ALLOC_STRATEGY)
+}
+
+/// Builder for creating a [`WasmExecutor`] instance.
+pub struct WasmExecutorBuilder<H> {
+	_phantom: PhantomData<H>,
+	method: WasmExecutionMethod,
+	onchain_heap_alloc_strategy: Option<HeapAllocStrategy>,
+	offchain_heap_alloc_strategy: Option<HeapAllocStrategy>,
+	max_runtime_instances: usize,
+	cache_path: Option<PathBuf>,
+	allow_missing_host_functions: bool,
+	runtime_cache_size: u8,
+}
+
+impl<H> WasmExecutorBuilder<H> {
+	/// Create a new instance of `Self`
+	///
+	/// - `method`: The wasm execution method that should be used by the executor.
+	pub fn new(method: WasmExecutionMethod) -> Self {
+		Self {
+			_phantom: PhantomData,
+			method,
+			onchain_heap_alloc_strategy: None,
+			offchain_heap_alloc_strategy: None,
+			max_runtime_instances: 2,
+			runtime_cache_size: 4,
+			allow_missing_host_functions: false,
+			cache_path: None,
+		}
+	}
+
+	/// Create the wasm executor with the given number of `heap_alloc_strategy` for onchain runtime
+	/// calls.
+	pub fn with_onchain_heap_alloc_strategy(
+		mut self,
+		heap_alloc_strategy: HeapAllocStrategy,
+	) -> Self {
+		self.onchain_heap_alloc_strategy = Some(heap_alloc_strategy);
+		self
+	}
+
+	/// Create the wasm executor with the given number of `heap_alloc_strategy` for offchain runtime
+	/// calls.
+	pub fn with_offchain_heap_alloc_strategy(
+		mut self,
+		heap_alloc_strategy: HeapAllocStrategy,
+	) -> Self {
+		self.offchain_heap_alloc_strategy = Some(heap_alloc_strategy);
+		self
+	}
+
+	/// Create the wasm executor with the given maximum number of `instances`.
+	///
+	/// The number of `instances` defines how many different instances of a runtime the cache is
+	/// storing.
+	///
+	/// By default the maximum number of `instances` is `2`.
+	pub fn with_max_runtime_instances(mut self, instances: usize) -> Self {
+		self.max_runtime_instances = instances;
+		self
+	}
+
+	/// Create the wasm executor with the given `cache_path`.
+	///
+	/// The `cache_path` is A path to a directory where the executor can place its files for
+	/// purposes of caching. This may be important in cases when there are many different modules
+	/// with the compiled execution method is used.
+	///
+	/// By default there is no `cache_path` given.
+	pub fn with_cache_path(mut self, cache_path: impl Into<PathBuf>) -> Self {
+		self.cache_path = Some(cache_path.into());
+		self
+	}
+
+	/// Create the wasm executor and allow/forbid missing host functions.
+	///
+	/// If missing host functions are forbidden, the instantiation of a wasm blob will fail
+	/// for imported host functions that the executor is not aware of. If they are allowed,
+	/// a stub is generated that will return an error when being called while executing the wasm.
+	///
+	/// By default missing host functions are forbidden.
+	pub fn with_allow_missing_host_functions(mut self, allow: bool) -> Self {
+		self.allow_missing_host_functions = allow;
+		self
+	}
+
+	/// Create the wasm executor with the given `runtime_cache_size`.
+	///
+	/// Defines the number of different runtimes/instantiated wasm blobs the cache stores.
+	/// Runtimes/wasm blobs are differentiated based on the hash and the number of heap pages.
+	///
+	/// By default this value is set to `4`.
+	pub fn with_runtime_cache_size(mut self, runtime_cache_size: u8) -> Self {
+		self.runtime_cache_size = runtime_cache_size;
+		self
+	}
+
+	/// Build the configured [`WasmExecutor`].
+	pub fn build(self) -> WasmExecutor<H> {
+		WasmExecutor {
+			method: self.method,
+			default_offchain_heap_alloc_strategy: unwrap_heap_pages(
+				self.offchain_heap_alloc_strategy,
+			),
+			default_onchain_heap_alloc_strategy: unwrap_heap_pages(
+				self.onchain_heap_alloc_strategy,
+			),
+			cache: Arc::new(RuntimeCache::new(
+				self.max_runtime_instances,
+				self.cache_path.clone(),
+				self.runtime_cache_size,
+			)),
+			cache_path: self.cache_path,
+			allow_missing_host_functions: self.allow_missing_host_functions,
+			phantom: PhantomData,
+		}
+	}
+}
+
 /// An abstraction over Wasm code executor. Supports selecting execution backend and
 /// manages runtime cache.
 pub struct WasmExecutor<H> {
 	/// Method used to execute fallback Wasm code.
 	method: WasmExecutionMethod,
-	/// The number of 64KB pages to allocate for Wasm execution.
-	default_heap_pages: u64,
+	/// The heap allocation strategy for onchain Wasm calls.
+	default_onchain_heap_alloc_strategy: HeapAllocStrategy,
+	/// The heap allocation strategy for offchain Wasm calls.
+	default_offchain_heap_alloc_strategy: HeapAllocStrategy,
 	/// WASM runtime cache.
 	cache: Arc<RuntimeCache>,
 	/// The path to a directory which the executor can leverage for a file cache, e.g. put there
@@ -100,7 +224,8 @@ impl<H> Clone for WasmExecutor<H> {
 	fn clone(&self) -> Self {
 		Self {
 			method: self.method,
-			default_heap_pages: self.default_heap_pages,
+			default_onchain_heap_alloc_strategy: self.default_onchain_heap_alloc_strategy,
+			default_offchain_heap_alloc_strategy: self.default_offchain_heap_alloc_strategy,
 			cache: self.cache.clone(),
 			cache_path: self.cache_path.clone(),
 			allow_missing_host_functions: self.allow_missing_host_functions,
@@ -119,8 +244,10 @@ where
 	///
 	/// `method` - Method used to execute Wasm code.
 	///
-	/// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution.
-	///   Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided.
+	/// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. Internally this
+	/// will be mapped as [`HeapAllocStrategy::Static`] where `default_heap_pages` represent the
+	/// static number of heap pages to allocate. Defaults to `DEFAULT_HEAP_ALLOC_STRATEGY` if `None`
+	/// is provided.
 	///
 	/// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse.
 	///
@@ -138,7 +265,12 @@ where
 	) -> Self {
 		WasmExecutor {
 			method,
-			default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES),
+			default_onchain_heap_alloc_strategy: unwrap_heap_pages(
+				default_heap_pages.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }),
+			),
+			default_offchain_heap_alloc_strategy: unwrap_heap_pages(
+				default_heap_pages.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ }),
+			),
 			cache: Arc::new(RuntimeCache::new(
 				max_runtime_instances,
 				cache_path.clone(),
@@ -150,6 +282,11 @@ where
 		}
 	}
 
+	/// Instantiate a builder for creating an instance of `Self`.
+	pub fn builder(method: WasmExecutionMethod) -> WasmExecutorBuilder<H> {
+		WasmExecutorBuilder::new(method)
+	}
+
 	/// Ignore missing function imports if set true.
 	pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) {
 		self.allow_missing_host_functions = allow_missing_host_functions
@@ -172,6 +309,7 @@ where
 		&self,
 		runtime_code: &RuntimeCode,
 		ext: &mut dyn Externalities,
+		heap_alloc_strategy: HeapAllocStrategy,
 		f: F,
 	) -> Result<R>
 	where
@@ -186,7 +324,7 @@ where
 			runtime_code,
 			ext,
 			self.method,
-			self.default_heap_pages,
+			heap_alloc_strategy,
 			self.allow_missing_host_functions,
 			|module, instance, version, ext| {
 				let module = AssertUnwindSafe(module);
@@ -259,7 +397,7 @@ where
 	) -> std::result::Result<Vec<u8>, Error> {
 		let module = crate::wasm_runtime::create_wasm_runtime_with_code::<H>(
 			self.method,
-			self.default_heap_pages,
+			self.default_onchain_heap_alloc_strategy,
 			runtime_blob,
 			allow_missing_host_functions,
 			self.cache_path.as_deref(),
@@ -334,6 +472,7 @@ where
 		method: &str,
 		data: &[u8],
 		_use_native: bool,
+		context: CallContext,
 	) -> (Result<Vec<u8>>, bool) {
 		tracing::trace!(
 			target: "executor",
@@ -341,10 +480,25 @@ where
 			"Executing function",
 		);
 
-		let result =
-			self.with_instance(runtime_code, ext, |_, mut instance, _onchain_version, mut ext| {
+		let on_chain_heap_alloc_strategy = runtime_code
+			.heap_pages
+			.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ })
+			.unwrap_or_else(|| self.default_onchain_heap_alloc_strategy);
+
+		let heap_alloc_strategy = match context {
+			CallContext::Offchain => self.default_offchain_heap_alloc_strategy,
+			CallContext::Onchain => on_chain_heap_alloc_strategy,
+		};
+
+		let result = self.with_instance(
+			runtime_code,
+			ext,
+			heap_alloc_strategy,
+			|_, mut instance, _onchain_version, mut ext| {
 				with_externalities_safe(&mut **ext, move || instance.call_export(method, data))
-			});
+			},
+		);
+
 		(result, false)
 	}
 }
@@ -358,20 +512,25 @@ where
 		ext: &mut dyn Externalities,
 		runtime_code: &RuntimeCode,
 	) -> Result<RuntimeVersion> {
-		self.with_instance(runtime_code, ext, |_module, _instance, version, _ext| {
-			Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into())))
-		})
+		let on_chain_heap_pages = runtime_code
+			.heap_pages
+			.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ })
+			.unwrap_or_else(|| self.default_onchain_heap_alloc_strategy);
+
+		self.with_instance(
+			runtime_code,
+			ext,
+			on_chain_heap_pages,
+			|_module, _instance, version, _ext| {
+				Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into())))
+			},
+		)
 	}
 }
 
 /// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence
 /// and dispatch to native code when possible, falling back on `WasmExecutor` when not.
-pub struct NativeElseWasmExecutor<D>
-where
-	D: NativeExecutionDispatch,
-{
-	/// Dummy field to avoid the compiler complaining about us not using `D`.
-	_dummy: PhantomData<D>,
+pub struct NativeElseWasmExecutor<D: NativeExecutionDispatch> {
 	/// Native runtime version info.
 	native_version: NativeVersion,
 	/// Fallback wasm executor.
@@ -386,8 +545,10 @@ impl<D: NativeExecutionDispatch> NativeElseWasmExecutor<D> {
 	///
 	/// `fallback_method` - Method used to execute fallback Wasm code.
 	///
-	/// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution.
-	/// 	Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided.
+	/// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. Internally this
+	/// will be mapped as [`HeapAllocStrategy::Static`] where `default_heap_pages` represent the
+	/// static number of heap pages to allocate. Defaults to `DEFAULT_HEAP_ALLOC_STRATEGY` if `None`
+	/// is provided.
 	///
 	/// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse.
 	///
@@ -406,11 +567,16 @@ impl<D: NativeExecutionDispatch> NativeElseWasmExecutor<D> {
 			runtime_cache_size,
 		);
 
-		NativeElseWasmExecutor {
-			_dummy: Default::default(),
-			native_version: D::native_version(),
-			wasm,
-		}
+		NativeElseWasmExecutor { native_version: D::native_version(), wasm }
+	}
+
+	/// Create a new instance using the given [`WasmExecutor`].
+	pub fn new_with_wasm_executor(
+		executor: WasmExecutor<
+			ExtendedHostFunctions<sp_io::SubstrateHostFunctions, D::ExtendHostFunctions>,
+		>,
+	) -> Self {
+		Self { native_version: D::native_version(), wasm: executor }
 	}
 
 	/// Ignore missing function imports if set true.
@@ -425,9 +591,7 @@ impl<D: NativeExecutionDispatch> RuntimeVersionOf for NativeElseWasmExecutor<D>
 		ext: &mut dyn Externalities,
 		runtime_code: &RuntimeCode,
 	) -> Result<RuntimeVersion> {
-		self.wasm.with_instance(runtime_code, ext, |_module, _instance, version, _ext| {
-			Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into())))
-		})
+		self.wasm.runtime_version(ext, runtime_code)
 	}
 }
 
@@ -447,6 +611,7 @@ impl<D: NativeExecutionDispatch + 'static> CodeExecutor for NativeElseWasmExecut
 		method: &str,
 		data: &[u8],
 		use_native: bool,
+		context: CallContext,
 	) -> (Result<Vec<u8>>, bool) {
 		tracing::trace!(
 			target: "executor",
@@ -454,10 +619,21 @@ impl<D: NativeExecutionDispatch + 'static> CodeExecutor for NativeElseWasmExecut
 			"Executing function",
 		);
 
+		let on_chain_heap_alloc_strategy = runtime_code
+			.heap_pages
+			.map(|h| HeapAllocStrategy::Static { extra_pages: h as _ })
+			.unwrap_or_else(|| self.wasm.default_onchain_heap_alloc_strategy);
+
+		let heap_alloc_strategy = match context {
+			CallContext::Offchain => self.wasm.default_offchain_heap_alloc_strategy,
+			CallContext::Onchain => on_chain_heap_alloc_strategy,
+		};
+
 		let mut used_native = false;
 		let result = self.wasm.with_instance(
 			runtime_code,
 			ext,
+			heap_alloc_strategy,
 			|_, mut instance, onchain_version, mut ext| {
 				let onchain_version =
 					onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?;
@@ -496,11 +672,7 @@ impl<D: NativeExecutionDispatch + 'static> CodeExecutor for NativeElseWasmExecut
 
 impl<D: NativeExecutionDispatch> Clone for NativeElseWasmExecutor<D> {
 	fn clone(&self) -> Self {
-		NativeElseWasmExecutor {
-			_dummy: Default::default(),
-			native_version: D::native_version(),
-			wasm: self.wasm.clone(),
-		}
+		NativeElseWasmExecutor { native_version: D::native_version(), wasm: self.wasm.clone() }
 	}
 }
 
diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs
index 4869fbae2d7db..254380dbb3693 100644
--- a/client/executor/src/wasm_runtime.rs
+++ b/client/executor/src/wasm_runtime.rs
@@ -27,7 +27,7 @@ use lru::LruCache;
 use parking_lot::Mutex;
 use sc_executor_common::{
 	runtime_blob::RuntimeBlob,
-	wasm_runtime::{WasmInstance, WasmModule},
+	wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule},
 };
 use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode};
 use sp_version::RuntimeVersion;
@@ -64,8 +64,8 @@ struct VersionedRuntimeId {
 	code_hash: Vec<u8>,
 	/// Wasm runtime type.
 	wasm_method: WasmExecutionMethod,
-	/// The number of WebAssembly heap pages this instance was created with.
-	heap_pages: u64,
+	/// The heap allocation strategy this runtime was created with.
+	heap_alloc_strategy: HeapAllocStrategy,
 }
 
 /// A Wasm runtime object along with its cached runtime version.
@@ -197,10 +197,12 @@ impl RuntimeCache {
 	///
 	/// `runtime_code` - The runtime wasm code used setup the runtime.
 	///
-	/// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution.
+	/// `ext` - The externalities to access the state.
 	///
 	/// `wasm_method` - Type of WASM backend to use.
 	///
+	/// `heap_alloc_strategy` - The heap allocation strategy to use.
+	///
 	/// `allow_missing_func_imports` - Ignore missing function imports.
 	///
 	/// `f` - Function to execute.
@@ -219,7 +221,7 @@ impl RuntimeCache {
 		runtime_code: &'c RuntimeCode<'c>,
 		ext: &mut dyn Externalities,
 		wasm_method: WasmExecutionMethod,
-		default_heap_pages: u64,
+		heap_alloc_strategy: HeapAllocStrategy,
 		allow_missing_func_imports: bool,
 		f: F,
 	) -> Result<Result<R, Error>, Error>
@@ -233,10 +235,9 @@ impl RuntimeCache {
 		) -> Result<R, Error>,
 	{
 		let code_hash = &runtime_code.hash;
-		let heap_pages = runtime_code.heap_pages.unwrap_or(default_heap_pages);
 
 		let versioned_runtime_id =
-			VersionedRuntimeId { code_hash: code_hash.clone(), heap_pages, wasm_method };
+			VersionedRuntimeId { code_hash: code_hash.clone(), heap_alloc_strategy, wasm_method };
 
 		let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f
 		let versioned_runtime = if let Some(versioned_runtime) = runtimes.get(&versioned_runtime_id)
@@ -251,7 +252,7 @@ impl RuntimeCache {
 				&code,
 				ext,
 				wasm_method,
-				heap_pages,
+				heap_alloc_strategy,
 				allow_missing_func_imports,
 				self.max_runtime_instances,
 				self.cache_path.as_deref(),
@@ -289,7 +290,7 @@ impl RuntimeCache {
 /// Create a wasm runtime with the given `code`.
 pub fn create_wasm_runtime_with_code<H>(
 	wasm_method: WasmExecutionMethod,
-	heap_pages: u64,
+	heap_alloc_strategy: HeapAllocStrategy,
 	blob: RuntimeBlob,
 	allow_missing_func_imports: bool,
 	cache_path: Option<&Path>,
@@ -307,7 +308,7 @@ where
 
 			sc_executor_wasmi::create_runtime(
 				blob,
-				heap_pages,
+				heap_alloc_strategy,
 				H::host_functions(),
 				allow_missing_func_imports,
 			)
@@ -320,12 +321,11 @@ where
 					allow_missing_func_imports,
 					cache_path: cache_path.map(ToOwned::to_owned),
 					semantics: sc_executor_wasmtime::Semantics {
-						extra_heap_pages: heap_pages,
+						heap_alloc_strategy,
 						instantiation_strategy,
 						deterministic_stack_limit: None,
 						canonicalize_nans: false,
 						parallel_compilation: true,
-						max_memory_size: None,
 					},
 				},
 			)
@@ -393,7 +393,7 @@ fn create_versioned_wasm_runtime<H>(
 	code: &[u8],
 	ext: &mut dyn Externalities,
 	wasm_method: WasmExecutionMethod,
-	heap_pages: u64,
+	heap_alloc_strategy: HeapAllocStrategy,
 	allow_missing_func_imports: bool,
 	max_instances: usize,
 	cache_path: Option<&Path>,
@@ -408,11 +408,11 @@ where
 	// Use the runtime blob to scan if there is any metadata embedded into the wasm binary
 	// pertaining to runtime version. We do it before consuming the runtime blob for creating the
 	// runtime.
-	let mut version: Option<_> = read_embedded_version(&blob)?;
+	let mut version = read_embedded_version(&blob)?;
 
 	let runtime = create_wasm_runtime_with_code::<H>(
 		wasm_method,
-		heap_pages,
+		heap_alloc_strategy,
 		blob,
 		allow_missing_func_imports,
 		cache_path,
diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml
index 4235440023c89..ded44f4ca77a9 100644
--- a/client/executor/wasmi/Cargo.toml
+++ b/client/executor/wasmi/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 log = "0.4.17"
-wasmi = "0.13"
+wasmi = { version = "0.13.2", features = [ "virtual_memory" ] }
 sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 sc-executor-common = { version = "0.10.0-dev", path = "../common" }
 sp-runtime-interface = { version = "7.0.0", path = "../../../primitives/runtime-interface" }
diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs
index 78b32a027eaa9..c757ff8afe43d 100644
--- a/client/executor/wasmi/src/lib.rs
+++ b/client/executor/wasmi/src/lib.rs
@@ -20,23 +20,58 @@
 
 use std::{cell::RefCell, str, sync::Arc};
 
-use log::{debug, error, trace};
+use log::{error, trace};
 use wasmi::{
 	memory_units::Pages,
-	FuncInstance, ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef,
+	FuncInstance, ImportsBuilder, MemoryRef, Module, ModuleInstance, ModuleRef,
 	RuntimeValue::{self, I32, I64},
 	TableRef,
 };
 
-use sc_allocator::AllocationStats;
+use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator};
 use sc_executor_common::{
 	error::{Error, MessageWithBacktrace, WasmError},
 	runtime_blob::{DataSegmentsSnapshot, RuntimeBlob},
-	wasm_runtime::{InvokeMethod, WasmInstance, WasmModule},
+	wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule},
 };
 use sp_runtime_interface::unpack_ptr_and_len;
 use sp_wasm_interface::{Function, FunctionContext, Pointer, Result as WResult, WordSize};
 
+/// Wrapper around [`MemorRef`] that implements [`sc_allocator::Memory`].
+struct MemoryWrapper<'a>(&'a MemoryRef);
+
+impl sc_allocator::Memory for MemoryWrapper<'_> {
+	fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R {
+		self.0.with_direct_access_mut(run)
+	}
+
+	fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R {
+		self.0.with_direct_access(run)
+	}
+
+	fn pages(&self) -> u32 {
+		self.0.current_size().0 as _
+	}
+
+	fn max_pages(&self) -> Option<u32> {
+		self.0.maximum().map(|p| p.0 as _)
+	}
+
+	fn grow(&mut self, additional: u32) -> Result<(), ()> {
+		self.0
+			.grow(Pages(additional as _))
+			.map_err(|e| {
+				log::error!(
+					target: "wasm-executor",
+					"Failed to grow memory by {} pages: {}",
+					additional,
+					e,
+				)
+			})
+			.map(drop)
+	}
+}
+
 struct FunctionExecutor {
 	heap: RefCell<sc_allocator::FreeingBumpHeapAllocator>,
 	memory: MemoryRef,
@@ -55,7 +90,7 @@ impl FunctionExecutor {
 		missing_functions: Arc<Vec<String>>,
 	) -> Result<Self, Error> {
 		Ok(FunctionExecutor {
-			heap: RefCell::new(sc_allocator::FreeingBumpHeapAllocator::new(heap_base)),
+			heap: RefCell::new(FreeingBumpHeapAllocator::new(heap_base)),
 			memory: m,
 			host_functions,
 			allow_missing_func_imports,
@@ -75,15 +110,17 @@ impl FunctionContext for FunctionExecutor {
 	}
 
 	fn allocate_memory(&mut self, size: WordSize) -> WResult<Pointer<u8>> {
-		let heap = &mut self.heap.borrow_mut();
-		self.memory
-			.with_direct_access_mut(|mem| heap.allocate(mem, size).map_err(|e| e.to_string()))
+		self.heap
+			.borrow_mut()
+			.allocate(&mut MemoryWrapper(&self.memory), size)
+			.map_err(|e| e.to_string())
 	}
 
 	fn deallocate_memory(&mut self, ptr: Pointer<u8>) -> WResult<()> {
-		let heap = &mut self.heap.borrow_mut();
-		self.memory
-			.with_direct_access_mut(|mem| heap.deallocate(mem, ptr).map_err(|e| e.to_string()))
+		self.heap
+			.borrow_mut()
+			.deallocate(&mut MemoryWrapper(&self.memory), ptr)
+			.map_err(|e| e.to_string())
 	}
 
 	fn register_panic_error_message(&mut self, message: &str) {
@@ -101,26 +138,17 @@ struct Resolver<'a> {
 	allow_missing_func_imports: bool,
 	/// All the names of functions for that we did not provide a host function.
 	missing_functions: RefCell<Vec<String>>,
-	/// Will be used as initial and maximum size of the imported memory.
-	heap_pages: usize,
-	/// By default, runtimes should import memory and this is `Some(_)` after
-	/// resolving. However, to be backwards compatible, we also support memory
-	/// exported by the WASM blob (this will be `None` after resolving).
-	import_memory: RefCell<Option<MemoryRef>>,
 }
 
 impl<'a> Resolver<'a> {
 	fn new(
 		host_functions: &'a [&'static dyn Function],
 		allow_missing_func_imports: bool,
-		heap_pages: usize,
 	) -> Resolver<'a> {
 		Resolver {
 			host_functions,
 			allow_missing_func_imports,
 			missing_functions: RefCell::new(Vec::new()),
-			heap_pages,
-			import_memory: Default::default(),
 		}
 	}
 }
@@ -148,7 +176,11 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> {
 		}
 
 		if self.allow_missing_func_imports {
-			trace!(target: "wasm-executor", "Could not find function `{}`, a stub will be provided instead.", name);
+			trace!(
+				target: "wasm-executor",
+				"Could not find function `{}`, a stub will be provided instead.",
+				name,
+			);
 			let id = self.missing_functions.borrow().len() + self.host_functions.len();
 			self.missing_functions.borrow_mut().push(name.to_string());
 
@@ -160,44 +192,12 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> {
 
 	fn resolve_memory(
 		&self,
-		field_name: &str,
-		memory_type: &wasmi::MemoryDescriptor,
+		_: &str,
+		_: &wasmi::MemoryDescriptor,
 	) -> Result<MemoryRef, wasmi::Error> {
-		if field_name == "memory" {
-			match &mut *self.import_memory.borrow_mut() {
-				Some(_) =>
-					Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())),
-				memory_ref @ None => {
-					if memory_type
-						.maximum()
-						.map(|m| m.saturating_sub(memory_type.initial()))
-						.map(|m| self.heap_pages > m as usize)
-						.unwrap_or(false)
-					{
-						Err(wasmi::Error::Instantiation(format!(
-							"Heap pages ({}) is greater than imported memory maximum ({}).",
-							self.heap_pages,
-							memory_type
-								.maximum()
-								.map(|m| m.saturating_sub(memory_type.initial()))
-								.expect("Maximum is set, checked above; qed"),
-						)))
-					} else {
-						let memory = MemoryInstance::alloc(
-							Pages(memory_type.initial() as usize + self.heap_pages),
-							Some(Pages(memory_type.initial() as usize + self.heap_pages)),
-						)?;
-						*memory_ref = Some(memory.clone());
-						Ok(memory)
-					}
-				},
-			}
-		} else {
-			Err(wasmi::Error::Instantiation(format!(
-				"Unknown memory reference with name: {}",
-				field_name
-			)))
-		}
+		Err(wasmi::Error::Instantiation(
+			"Internal error, wasmi expects that the wasm blob exports memory.".into(),
+		))
 	}
 }
 
@@ -358,12 +358,11 @@ fn call_in_wasm_module(
 
 /// Prepare module instance
 fn instantiate_module(
-	heap_pages: usize,
 	module: &Module,
 	host_functions: &[&'static dyn Function],
 	allow_missing_func_imports: bool,
 ) -> Result<(ModuleRef, Vec<String>, MemoryRef), Error> {
-	let resolver = Resolver::new(host_functions, allow_missing_func_imports, heap_pages);
+	let resolver = Resolver::new(host_functions, allow_missing_func_imports);
 	// start module instantiation. Don't run 'start' function yet.
 	let intermediate_instance =
 		ModuleInstance::new(module, &ImportsBuilder::new().with_resolver("env", &resolver))?;
@@ -371,22 +370,10 @@ fn instantiate_module(
 	// Verify that the module has the heap base global variable.
 	let _ = get_heap_base(intermediate_instance.not_started_instance())?;
 
-	// Get the memory reference. Runtimes should import memory, but to be backwards
-	// compatible we also support exported memory.
-	let memory = match resolver.import_memory.into_inner() {
-		Some(memory) => memory,
-		None => {
-			debug!(
-				target: "wasm-executor",
-				"WASM blob does not imports memory, falling back to exported memory",
-			);
-
-			let memory = get_mem_instance(intermediate_instance.not_started_instance())?;
-			memory.grow(Pages(heap_pages)).map_err(|_| Error::Runtime)?;
-
-			memory
-		},
-	};
+	// The `module` should export the memory with the correct properties (min, max).
+	//
+	// This is ensured by modifying the `RuntimeBlob` before initializing the `Module`.
+	let memory = get_mem_instance(intermediate_instance.not_started_instance())?;
 
 	if intermediate_instance.has_start() {
 		// Runtime is not allowed to have the `start` function.
@@ -451,8 +438,6 @@ pub struct WasmiRuntime {
 	/// Enable stub generation for functions that are not available in `host_functions`.
 	/// These stubs will error when the wasm blob tries to call them.
 	allow_missing_func_imports: bool,
-	/// Numer of heap pages this runtime uses.
-	heap_pages: u64,
 
 	global_vals_snapshot: GlobalValsSnapshot,
 	data_segments_snapshot: DataSegmentsSnapshot,
@@ -461,13 +446,9 @@ pub struct WasmiRuntime {
 impl WasmModule for WasmiRuntime {
 	fn new_instance(&self) -> Result<Box<dyn WasmInstance>, Error> {
 		// Instantiate this module.
-		let (instance, missing_functions, memory) = instantiate_module(
-			self.heap_pages as usize,
-			&self.module,
-			&self.host_functions,
-			self.allow_missing_func_imports,
-		)
-		.map_err(|e| WasmError::Instantiation(e.to_string()))?;
+		let (instance, missing_functions, memory) =
+			instantiate_module(&self.module, &self.host_functions, self.allow_missing_func_imports)
+				.map_err(|e| WasmError::Instantiation(e.to_string()))?;
 
 		Ok(Box::new(WasmiInstance {
 			instance,
@@ -477,6 +458,7 @@ impl WasmModule for WasmiRuntime {
 			host_functions: self.host_functions.clone(),
 			allow_missing_func_imports: self.allow_missing_func_imports,
 			missing_functions: Arc::new(missing_functions),
+			memory_zeroed: true,
 		}))
 	}
 }
@@ -484,25 +466,26 @@ impl WasmModule for WasmiRuntime {
 /// Create a new `WasmiRuntime` given the code. This function loads the module and
 /// stores it in the instance.
 pub fn create_runtime(
-	blob: RuntimeBlob,
-	heap_pages: u64,
+	mut blob: RuntimeBlob,
+	heap_alloc_strategy: HeapAllocStrategy,
 	host_functions: Vec<&'static dyn Function>,
 	allow_missing_func_imports: bool,
 ) -> Result<WasmiRuntime, WasmError> {
 	let data_segments_snapshot =
 		DataSegmentsSnapshot::take(&blob).map_err(|e| WasmError::Other(e.to_string()))?;
 
+	// Make sure we only have exported memory to simplify the code of the wasmi executor.
+	blob.convert_memory_import_into_export()?;
+	// Ensure that the memory uses the correct heap pages.
+	blob.setup_memory_according_to_heap_alloc_strategy(heap_alloc_strategy)?;
+
 	let module =
 		Module::from_parity_wasm_module(blob.into_inner()).map_err(|_| WasmError::InvalidModule)?;
 
 	let global_vals_snapshot = {
-		let (instance, _, _) = instantiate_module(
-			heap_pages as usize,
-			&module,
-			&host_functions,
-			allow_missing_func_imports,
-		)
-		.map_err(|e| WasmError::Instantiation(e.to_string()))?;
+		let (instance, _, _) =
+			instantiate_module(&module, &host_functions, allow_missing_func_imports)
+				.map_err(|e| WasmError::Instantiation(e.to_string()))?;
 		GlobalValsSnapshot::take(&instance)
 	};
 
@@ -512,7 +495,6 @@ pub fn create_runtime(
 		global_vals_snapshot,
 		host_functions: Arc::new(host_functions),
 		allow_missing_func_imports,
-		heap_pages,
 	})
 }
 
@@ -522,6 +504,8 @@ pub struct WasmiInstance {
 	instance: ModuleRef,
 	/// The memory instance of used by the wasm module.
 	memory: MemoryRef,
+	/// Is the memory zeroed?
+	memory_zeroed: bool,
 	/// The snapshot of global variable values just after instantiation.
 	global_vals_snapshot: GlobalValsSnapshot,
 	/// The snapshot of data segments.
@@ -549,14 +533,16 @@ impl WasmiInstance {
 		// We reuse a single wasm instance for multiple calls and a previous call (if any)
 		// altered the state. Therefore, we need to restore the instance to original state.
 
-		// First, zero initialize the linear memory.
-		self.memory.erase().map_err(|e| {
-			// Snapshot restoration failed. This is pretty unexpected since this can happen
-			// if some invariant is broken or if the system is under extreme memory pressure
-			// (so erasing fails).
-			error!(target: "wasm-executor", "snapshot restoration failed: {}", e);
-			WasmError::ErasingFailed(e.to_string())
-		})?;
+		if !self.memory_zeroed {
+			// First, zero initialize the linear memory.
+			self.memory.erase().map_err(|e| {
+				// Snapshot restoration failed. This is pretty unexpected since this can happen
+				// if some invariant is broken or if the system is under extreme memory pressure
+				// (so erasing fails).
+				error!(target: "wasm-executor", "snapshot restoration failed: {}", e);
+				WasmError::ErasingFailed(e.to_string())
+			})?;
+		}
 
 		// Second, reapply data segments into the linear memory.
 		self.data_segments_snapshot
@@ -565,7 +551,7 @@ impl WasmiInstance {
 		// Third, restore the global variables to their initial values.
 		self.global_vals_snapshot.apply(&self.instance)?;
 
-		call_in_wasm_module(
+		let res = call_in_wasm_module(
 			&self.instance,
 			&self.memory,
 			method,
@@ -574,7 +560,12 @@ impl WasmiInstance {
 			self.allow_missing_func_imports,
 			self.missing_functions.clone(),
 			allocation_stats,
-		)
+		);
+
+		// If we couldn't unmap it, erase the memory.
+		self.memory_zeroed = self.memory.erase().is_ok();
+
+		res
 	}
 }
 
@@ -601,4 +592,8 @@ impl WasmInstance for WasmiInstance {
 			None => Ok(None),
 		}
 	}
+
+	fn linear_memory_base_ptr(&self) -> Option<*const u8> {
+		Some(self.memory.direct_access().as_ref().as_ptr())
+	}
 }
diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml
index ca7daa970207d..2e892a98a126b 100644
--- a/client/executor/wasmtime/Cargo.toml
+++ b/client/executor/wasmtime/Cargo.toml
@@ -13,9 +13,9 @@ readme = "README.md"
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
+log = "0.4.17"
 cfg-if = "1.0"
 libc = "0.2.121"
-log = "0.4.17"
 
 # When bumping wasmtime do not forget to also bump rustix
 # to exactly the same version as used by wasmtime!
diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs
index ead248385c9fe..9bd3ca3dade5e 100644
--- a/client/executor/wasmtime/src/host.rs
+++ b/client/executor/wasmtime/src/host.rs
@@ -24,20 +24,25 @@ use wasmtime::Caller;
 use sc_allocator::{AllocationStats, FreeingBumpHeapAllocator};
 use sp_wasm_interface::{Pointer, WordSize};
 
-use crate::{runtime::StoreData, util};
+use crate::{instance_wrapper::MemoryWrapper, runtime::StoreData, util};
 
 /// The state required to construct a HostContext context. The context only lasts for one host
 /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make
 /// many different host calls that must share state.
 pub struct HostState {
-	allocator: FreeingBumpHeapAllocator,
+	/// The allocator instance to keep track of allocated memory.
+	///
+	/// This is stored as an `Option` as we need to temporarly set this to `None` when we are
+	/// allocating/deallocating memory. The problem being that we can only mutable access `caller`
+	/// once.
+	allocator: Option<FreeingBumpHeapAllocator>,
 	panic_message: Option<String>,
 }
 
 impl HostState {
 	/// Constructs a new `HostState`.
 	pub fn new(allocator: FreeingBumpHeapAllocator) -> Self {
-		HostState { allocator, panic_message: None }
+		HostState { allocator: Some(allocator), panic_message: None }
 	}
 
 	/// Takes the error message out of the host state, leaving a `None` in its place.
@@ -46,7 +51,9 @@ impl HostState {
 	}
 
 	pub(crate) fn allocation_stats(&self) -> AllocationStats {
-		self.allocator.stats()
+		self.allocator.as_ref()
+			.expect("Allocator is always set and only unavailable when doing an allocation/deallocation; qed")
+			.stats()
 	}
 }
 
@@ -81,22 +88,38 @@ impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> {
 
 	fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result<Pointer<u8>> {
 		let memory = self.caller.data().memory();
-		let (memory, data) = memory.data_and_store_mut(&mut self.caller);
-		data.host_state_mut()
-			.expect("host state is not empty when calling a function in wasm; qed")
+		let mut allocator = self
+			.host_state_mut()
 			.allocator
-			.allocate(memory, size)
-			.map_err(|e| e.to_string())
+			.take()
+			.expect("allocator is not empty when calling a function in wasm; qed");
+
+		// We can not return on error early, as we need to store back allocator.
+		let res = allocator
+			.allocate(&mut MemoryWrapper(&memory, &mut self.caller), size)
+			.map_err(|e| e.to_string());
+
+		self.host_state_mut().allocator = Some(allocator);
+
+		res
 	}
 
 	fn deallocate_memory(&mut self, ptr: Pointer<u8>) -> sp_wasm_interface::Result<()> {
 		let memory = self.caller.data().memory();
-		let (memory, data) = memory.data_and_store_mut(&mut self.caller);
-		data.host_state_mut()
-			.expect("host state is not empty when calling a function in wasm; qed")
+		let mut allocator = self
+			.host_state_mut()
 			.allocator
-			.deallocate(memory, ptr)
-			.map_err(|e| e.to_string())
+			.take()
+			.expect("allocator is not empty when calling a function in wasm; qed");
+
+		// We can not return on error early, as we need to store back allocator.
+		let res = allocator
+			.deallocate(&mut MemoryWrapper(&memory, &mut self.caller), ptr)
+			.map_err(|e| e.to_string());
+
+		self.host_state_mut().allocator = Some(allocator);
+
+		res
 	}
 
 	fn register_panic_error_message(&mut self, message: &str) {
diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs
index 2fb27180fcc76..6d319cce509e5 100644
--- a/client/executor/wasmtime/src/instance_wrapper.rs
+++ b/client/executor/wasmtime/src/instance_wrapper.rs
@@ -26,8 +26,7 @@ use sc_executor_common::{
 };
 use sp_wasm_interface::{Pointer, Value, WordSize};
 use wasmtime::{
-	AsContext, AsContextMut, Engine, Extern, Func, Global, Instance, InstancePre, Memory, Table,
-	Val,
+	AsContext, AsContextMut, Engine, Extern, Instance, InstancePre, Memory, Table, Val,
 };
 
 /// Invoked entrypoint format.
@@ -113,66 +112,58 @@ impl EntryPoint {
 	}
 }
 
-/// Wrap the given WebAssembly Instance of a wasm module with Substrate-runtime.
-///
-/// This struct is a handy wrapper around a wasmtime `Instance` that provides substrate specific
-/// routines.
-pub struct InstanceWrapper {
-	instance: Instance,
-	memory: Memory,
-	store: Store,
-}
+/// Wrapper around [`Memory`] that implements [`sc_allocator::Memory`].
+pub(crate) struct MemoryWrapper<'a, C>(pub &'a wasmtime::Memory, pub &'a mut C);
 
-fn extern_memory(extern_: &Extern) -> Option<&Memory> {
-	match extern_ {
-		Extern::Memory(mem) => Some(mem),
-		_ => None,
+impl<C: AsContextMut> sc_allocator::Memory for MemoryWrapper<'_, C> {
+	fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R {
+		run(self.0.data(&self.1))
 	}
-}
 
-fn extern_global(extern_: &Extern) -> Option<&Global> {
-	match extern_ {
-		Extern::Global(glob) => Some(glob),
-		_ => None,
+	fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R {
+		run(self.0.data_mut(&mut self.1))
 	}
-}
 
-fn extern_table(extern_: &Extern) -> Option<&Table> {
-	match extern_ {
-		Extern::Table(table) => Some(table),
-		_ => None,
+	fn grow(&mut self, additional: u32) -> std::result::Result<(), ()> {
+		self.0
+			.grow(&mut self.1, additional as u64)
+			.map_err(|e| {
+				log::error!(
+					target: "wasm-executor",
+					"Failed to grow memory by {} pages: {}",
+					additional,
+					e,
+				)
+			})
+			.map(drop)
 	}
-}
 
-fn extern_func(extern_: &Extern) -> Option<&Func> {
-	match extern_ {
-		Extern::Func(func) => Some(func),
-		_ => None,
+	fn pages(&self) -> u32 {
+		self.0.size(&self.1) as u32
 	}
-}
 
-pub(crate) fn create_store(engine: &wasmtime::Engine, max_memory_size: Option<usize>) -> Store {
-	let limits = if let Some(max_memory_size) = max_memory_size {
-		wasmtime::StoreLimitsBuilder::new().memory_size(max_memory_size).build()
-	} else {
-		Default::default()
-	};
-
-	let mut store =
-		Store::new(engine, StoreData { limits, host_state: None, memory: None, table: None });
-	if max_memory_size.is_some() {
-		store.limiter(|s| &mut s.limits);
+	fn max_pages(&self) -> Option<u32> {
+		self.0.ty(&self.1).maximum().map(|p| p as _)
 	}
-	store
+}
+
+/// Wrap the given WebAssembly Instance of a wasm module with Substrate-runtime.
+///
+/// This struct is a handy wrapper around a wasmtime `Instance` that provides substrate specific
+/// routines.
+pub struct InstanceWrapper {
+	instance: Instance,
+	/// The memory instance of the `instance`.
+	///
+	/// It is important to make sure that we don't make any copies of this to make it easier to
+	/// proof
+	memory: Memory,
+	store: Store,
 }
 
 impl InstanceWrapper {
-	pub(crate) fn new(
-		engine: &Engine,
-		instance_pre: &InstancePre<StoreData>,
-		max_memory_size: Option<usize>,
-	) -> Result<Self> {
-		let mut store = create_store(engine, max_memory_size);
+	pub(crate) fn new(engine: &Engine, instance_pre: &InstancePre<StoreData>) -> Result<Self> {
+		let mut store = Store::new(engine, Default::default());
 		let instance = instance_pre.instantiate(&mut store).map_err(|error| {
 			WasmError::Other(format!(
 				"failed to instantiate a new WASM module instance: {:#}",
@@ -201,9 +192,10 @@ impl InstanceWrapper {
 					self.instance.get_export(&mut self.store, method).ok_or_else(|| {
 						Error::from(format!("Exported method {} is not found", method))
 					})?;
-				let func = extern_func(&export)
+				let func = export
+					.into_func()
 					.ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?;
-				EntryPoint::direct(*func, &self.store).map_err(|_| {
+				EntryPoint::direct(func, &self.store).map_err(|_| {
 					Error::from(format!("Exported function '{}' has invalid signature.", method))
 				})?
 			},
@@ -259,7 +251,8 @@ impl InstanceWrapper {
 			.get_export(&mut self.store, "__heap_base")
 			.ok_or_else(|| Error::from("__heap_base is not found"))?;
 
-		let heap_base_global = extern_global(&heap_base_export)
+		let heap_base_global = heap_base_export
+			.into_global()
 			.ok_or_else(|| Error::from("__heap_base is not a global"))?;
 
 		let heap_base = heap_base_global
@@ -277,7 +270,7 @@ impl InstanceWrapper {
 			None => return Ok(None),
 		};
 
-		let global = extern_global(&global).ok_or_else(|| format!("`{}` is not a global", name))?;
+		let global = global.into_global().ok_or_else(|| format!("`{}` is not a global", name))?;
 
 		match global.get(&mut self.store) {
 			Val::I32(val) => Ok(Some(Value::I32(val))),
@@ -300,7 +293,8 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result<Memo
 		.get_export(ctx, "memory")
 		.ok_or_else(|| Error::from("memory is not exported under `memory` name"))?;
 
-	let memory = *extern_memory(&memory_export)
+	let memory = memory_export
+		.into_memory()
 		.ok_or_else(|| Error::from("the `memory` export should have memory type"))?;
 
 	Ok(memory)
@@ -311,8 +305,8 @@ fn get_table(instance: &Instance, ctx: &mut Store) -> Option<Table> {
 	instance
 		.get_export(ctx, "__indirect_function_table")
 		.as_ref()
-		.and_then(extern_table)
 		.cloned()
+		.and_then(Extern::into_table)
 }
 
 /// Functions related to memory.
@@ -403,7 +397,7 @@ fn decommit_works() {
 	let module = wasmtime::Module::new(&engine, code).unwrap();
 	let linker = wasmtime::Linker::new(&engine);
 	let instance_pre = linker.instantiate_pre(&module).unwrap();
-	let mut wrapper = InstanceWrapper::new(&engine, &instance_pre, None).unwrap();
+	let mut wrapper = InstanceWrapper::new(&engine, &instance_pre).unwrap();
 	unsafe { *wrapper.memory.data_ptr(&wrapper.store) = 42 };
 	assert_eq!(unsafe { *wrapper.memory.data_ptr(&wrapper.store) }, 42);
 	wrapper.decommit();
diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs
index ce3f889fc9a75..de7a2ea0e73df 100644
--- a/client/executor/wasmtime/src/runtime.rs
+++ b/client/executor/wasmtime/src/runtime.rs
@@ -20,7 +20,7 @@
 
 use crate::{
 	host::HostState,
-	instance_wrapper::{EntryPoint, InstanceWrapper},
+	instance_wrapper::{EntryPoint, InstanceWrapper, MemoryWrapper},
 	util::{self, replace_strategy_if_broken},
 };
 
@@ -31,7 +31,7 @@ use sc_executor_common::{
 		self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob,
 	},
 	util::checked_range,
-	wasm_runtime::{InvokeMethod, WasmInstance, WasmModule},
+	wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule},
 };
 use sp_runtime_interface::unpack_ptr_and_len;
 use sp_wasm_interface::{HostFunctions, Pointer, Value, WordSize};
@@ -42,12 +42,10 @@ use std::{
 		Arc,
 	},
 };
-use wasmtime::{AsContext, Engine, Memory, StoreLimits, Table};
+use wasmtime::{AsContext, Engine, Memory, Table};
 
+#[derive(Default)]
 pub(crate) struct StoreData {
-	/// The limits we apply to the store. We need to store it here to return a reference to this
-	/// object when we have the limits enabled.
-	pub(crate) limits: StoreLimits,
 	/// This will only be set when we call into the runtime.
 	pub(crate) host_state: Option<HostState>,
 	/// This will be always set once the store is initialized.
@@ -83,12 +81,11 @@ enum Strategy {
 struct InstanceCreator {
 	engine: wasmtime::Engine,
 	instance_pre: Arc<wasmtime::InstancePre<StoreData>>,
-	max_memory_size: Option<usize>,
 }
 
 impl InstanceCreator {
 	fn instantiate(&mut self) -> Result<InstanceWrapper> {
-		InstanceWrapper::new(&self.engine, &self.instance_pre, self.max_memory_size)
+		InstanceWrapper::new(&self.engine, &self.instance_pre)
 	}
 }
 
@@ -128,18 +125,13 @@ pub struct WasmtimeRuntime {
 	engine: wasmtime::Engine,
 	instance_pre: Arc<wasmtime::InstancePre<StoreData>>,
 	instantiation_strategy: InternalInstantiationStrategy,
-	config: Config,
 }
 
 impl WasmModule for WasmtimeRuntime {
 	fn new_instance(&self) -> Result<Box<dyn WasmInstance>> {
 		let strategy = match self.instantiation_strategy {
 			InternalInstantiationStrategy::LegacyInstanceReuse(ref snapshot_data) => {
-				let mut instance_wrapper = InstanceWrapper::new(
-					&self.engine,
-					&self.instance_pre,
-					self.config.semantics.max_memory_size,
-				)?;
+				let mut instance_wrapper = InstanceWrapper::new(&self.engine, &self.instance_pre)?;
 				let heap_base = instance_wrapper.extract_heap_base()?;
 
 				// This function panics if the instance was created from a runtime blob different
@@ -161,7 +153,6 @@ impl WasmModule for WasmtimeRuntime {
 			InternalInstantiationStrategy::Builtin => Strategy::RecreateInstance(InstanceCreator {
 				engine: self.engine.clone(),
 				instance_pre: self.instance_pre.clone(),
-				max_memory_size: self.config.semantics.max_memory_size,
 			}),
 		};
 
@@ -350,25 +341,22 @@ fn common_config(semantics: &Semantics) -> std::result::Result<wasmtime::Config,
 		InstantiationStrategy::LegacyInstanceReuse => (false, false),
 	};
 
+	const WASM_PAGE_SIZE: u64 = 65536;
+
 	config.memory_init_cow(use_cow);
-	config.memory_guaranteed_dense_image_size(
-		semantics.max_memory_size.map(|max| max as u64).unwrap_or(u64::MAX),
-	);
+	config.memory_guaranteed_dense_image_size(match semantics.heap_alloc_strategy {
+		HeapAllocStrategy::Dynamic { maximum_pages } =>
+			maximum_pages.map(|p| p as u64 * WASM_PAGE_SIZE).unwrap_or(u64::MAX),
+		HeapAllocStrategy::Static { .. } => u64::MAX,
+	});
 
 	if use_pooling {
-		const WASM_PAGE_SIZE: u64 = 65536;
 		const MAX_WASM_PAGES: u64 = 0x10000;
 
-		let memory_pages = if let Some(max_memory_size) = semantics.max_memory_size {
-			let max_memory_size = max_memory_size as u64;
-			let mut pages = max_memory_size / WASM_PAGE_SIZE;
-			if max_memory_size % WASM_PAGE_SIZE != 0 {
-				pages += 1;
-			}
-
-			std::cmp::min(MAX_WASM_PAGES, pages)
-		} else {
-			MAX_WASM_PAGES
+		let memory_pages = match semantics.heap_alloc_strategy {
+			HeapAllocStrategy::Dynamic { maximum_pages } =>
+				maximum_pages.map(|p| p as u64).unwrap_or(MAX_WASM_PAGES),
+			HeapAllocStrategy::Static { .. } => MAX_WASM_PAGES,
 		};
 
 		let mut pooling_config = wasmtime::PoolingAllocationConfig::default();
@@ -514,25 +502,8 @@ pub struct Semantics {
 	/// Configures wasmtime to use multiple threads for compiling.
 	pub parallel_compilation: bool,
 
-	/// The number of extra WASM pages which will be allocated
-	/// on top of what is requested by the WASM blob itself.
-	pub extra_heap_pages: u64,
-
-	/// The total amount of memory in bytes an instance can request.
-	///
-	/// If specified, the runtime will be able to allocate only that much of wasm memory.
-	/// This is the total number and therefore the [`Semantics::extra_heap_pages`] is accounted
-	/// for.
-	///
-	/// That means that the initial number of pages of a linear memory plus the
-	/// [`Semantics::extra_heap_pages`] multiplied by the wasm page size (64KiB) should be less
-	/// than or equal to `max_memory_size`, otherwise the instance won't be created.
-	///
-	/// Moreover, `memory.grow` will fail (return -1) if the sum of sizes of currently mounted
-	/// and additional pages exceeds `max_memory_size`.
-	///
-	/// The default is `None`.
-	pub max_memory_size: Option<usize>,
+	/// The heap allocation strategy to use.
+	pub heap_alloc_strategy: HeapAllocStrategy,
 }
 
 #[derive(Clone)]
@@ -689,12 +660,7 @@ where
 		.instantiate_pre(&module)
 		.map_err(|e| WasmError::Other(format!("cannot preinstantiate module: {:#}", e)))?;
 
-	Ok(WasmtimeRuntime {
-		engine,
-		instance_pre: Arc::new(instance_pre),
-		instantiation_strategy,
-		config,
-	})
+	Ok(WasmtimeRuntime { engine, instance_pre: Arc::new(instance_pre), instantiation_strategy })
 }
 
 fn prepare_blob_for_compilation(
@@ -717,12 +683,7 @@ fn prepare_blob_for_compilation(
 	// now automatically take care of creating the memory for us, and it is also necessary
 	// to enable `wasmtime`'s instance pooling. (Imported memories are ineligible for pooling.)
 	blob.convert_memory_import_into_export()?;
-	blob.add_extra_heap_pages_to_memory_section(
-		semantics
-			.extra_heap_pages
-			.try_into()
-			.map_err(|e| WasmError::Other(format!("invalid `extra_heap_pages`: {}", e)))?,
-	)?;
+	blob.setup_memory_according_to_heap_alloc_strategy(semantics.heap_alloc_strategy)?;
 
 	Ok(blob)
 }
@@ -783,9 +744,8 @@ fn inject_input_data(
 ) -> Result<(Pointer<u8>, WordSize)> {
 	let mut ctx = instance.store_mut();
 	let memory = ctx.data().memory();
-	let memory = memory.data_mut(&mut ctx);
 	let data_len = data.len() as WordSize;
-	let data_ptr = allocator.allocate(memory, data_len)?;
+	let data_ptr = allocator.allocate(&mut MemoryWrapper(&memory, &mut ctx), data_len)?;
 	util::write_memory_from(instance.store_mut(), data_ptr, data)?;
 	Ok((data_ptr, data_len))
 }
diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs
index 57540b35d05cf..7f222b6cf7c05 100644
--- a/client/executor/wasmtime/src/tests.rs
+++ b/client/executor/wasmtime/src/tests.rs
@@ -17,7 +17,11 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use codec::{Decode as _, Encode as _};
-use sc_executor_common::{error::Error, runtime_blob::RuntimeBlob, wasm_runtime::WasmModule};
+use sc_executor_common::{
+	error::Error,
+	runtime_blob::RuntimeBlob,
+	wasm_runtime::{HeapAllocStrategy, WasmModule},
+};
 use sc_runtime_test::wasm_binary_unwrap;
 
 use crate::InstantiationStrategy;
@@ -77,8 +81,7 @@ struct RuntimeBuilder {
 	instantiation_strategy: InstantiationStrategy,
 	canonicalize_nans: bool,
 	deterministic_stack: bool,
-	extra_heap_pages: u64,
-	max_memory_size: Option<usize>,
+	heap_pages: HeapAllocStrategy,
 	precompile_runtime: bool,
 	tmpdir: Option<tempfile::TempDir>,
 }
@@ -90,8 +93,7 @@ impl RuntimeBuilder {
 			instantiation_strategy,
 			canonicalize_nans: false,
 			deterministic_stack: false,
-			extra_heap_pages: 1024,
-			max_memory_size: None,
+			heap_pages: HeapAllocStrategy::Static { extra_pages: 1024 },
 			precompile_runtime: false,
 			tmpdir: None,
 		}
@@ -117,8 +119,8 @@ impl RuntimeBuilder {
 		self
 	}
 
-	fn max_memory_size(mut self, max_memory_size: Option<usize>) -> Self {
-		self.max_memory_size = max_memory_size;
+	fn heap_alloc_strategy(mut self, heap_pages: HeapAllocStrategy) -> Self {
+		self.heap_pages = heap_pages;
 		self
 	}
 
@@ -152,8 +154,7 @@ impl RuntimeBuilder {
 				},
 				canonicalize_nans: self.canonicalize_nans,
 				parallel_compilation: true,
-				extra_heap_pages: self.extra_heap_pages,
-				max_memory_size: self.max_memory_size,
+				heap_alloc_strategy: self.heap_pages,
 			},
 		};
 
@@ -227,7 +228,7 @@ fn deep_call_stack_wat(depth: usize) -> String {
 // We need two limits here since depending on whether the code is compiled in debug
 // or in release mode the maximum call depth is slightly different.
 const CALL_DEPTH_LOWER_LIMIT: usize = 65455;
-const CALL_DEPTH_UPPER_LIMIT: usize = 65503;
+const CALL_DEPTH_UPPER_LIMIT: usize = 65509;
 
 test_wasm_execution!(test_consume_under_1mb_of_stack_does_not_trap);
 fn test_consume_under_1mb_of_stack_does_not_trap(instantiation_strategy: InstantiationStrategy) {
@@ -344,29 +345,25 @@ fn test_max_memory_pages(
 	import_memory: bool,
 	precompile_runtime: bool,
 ) {
-	fn try_instantiate(
-		max_memory_size: Option<usize>,
+	fn call(
+		heap_alloc_strategy: HeapAllocStrategy,
 		wat: String,
 		instantiation_strategy: InstantiationStrategy,
 		precompile_runtime: bool,
 	) -> Result<(), Box<dyn std::error::Error>> {
 		let mut builder = RuntimeBuilder::new(instantiation_strategy)
 			.use_wat(wat)
-			.max_memory_size(max_memory_size)
+			.heap_alloc_strategy(heap_alloc_strategy)
 			.precompile_runtime(precompile_runtime);
 
 		let runtime = builder.build();
-		let mut instance = runtime.new_instance()?;
+		let mut instance = runtime.new_instance().unwrap();
 		let _ = instance.call_export("main", &[])?;
 		Ok(())
 	}
 
-	fn memory(initial: u32, maximum: Option<u32>, import: bool) -> String {
-		let memory = if let Some(maximum) = maximum {
-			format!("(memory $0 {} {})", initial, maximum)
-		} else {
-			format!("(memory $0 {})", initial)
-		};
+	fn memory(initial: u32, maximum: u32, import: bool) -> String {
+		let memory = format!("(memory $0 {} {})", initial, maximum);
 
 		if import {
 			format!("(import \"env\" \"memory\" {})", memory)
@@ -375,152 +372,90 @@ fn test_max_memory_pages(
 		}
 	}
 
-	const WASM_PAGE_SIZE: usize = 65536;
-
-	// check the old behavior if preserved. That is, if no limit is set we allow 4 GiB of memory.
-	try_instantiate(
-		None,
-		format!(
-			r#"
-			(module
-				{}
-				(global (export "__heap_base") i32 (i32.const 0))
-				(func (export "main")
-					(param i32 i32) (result i64)
-					(i64.const 0)
-				)
-			)
-			"#,
-			/*
-				We want to allocate the maximum number of pages supported in wasm for this test.
-				However, due to a bug in wasmtime (I think wasmi is also affected) it is only possible
-				to allocate 65536 - 1 pages.
-
-				Then, during creation of the Substrate Runtime instance, 1024 (heap_pages) pages are
-				mounted.
-
-				Thus 65535 = 64511 + 1024
-			*/
-			memory(64511, None, import_memory)
-		),
-		instantiation_strategy,
-		precompile_runtime,
-	)
-	.unwrap();
-
-	// max is not specified, therefore it's implied to be 65536 pages (4 GiB).
-	//
-	// max_memory_size = (1 (initial) + 1024 (heap_pages)) * WASM_PAGE_SIZE
-	try_instantiate(
-		Some((1 + 1024) * WASM_PAGE_SIZE),
-		format!(
-			r#"
-			(module
-				{}
-				(global (export "__heap_base") i32 (i32.const 0))
-				(func (export "main")
-					(param i32 i32) (result i64)
-					(i64.const 0)
-				)
-			)
-			"#,
-			// 1 initial, max is not specified.
-			memory(1, None, import_memory)
-		),
-		instantiation_strategy,
-		precompile_runtime,
-	)
-	.unwrap();
-
-	// max is specified explicitly to 2048 pages.
-	try_instantiate(
-		Some((1 + 1024) * WASM_PAGE_SIZE),
-		format!(
-			r#"
-			(module
-				{}
-				(global (export "__heap_base") i32 (i32.const 0))
-				(func (export "main")
-					(param i32 i32) (result i64)
-					(i64.const 0)
-				)
-			)
-			"#,
-			// Max is 2048.
-			memory(1, Some(2048), import_memory)
-		),
-		instantiation_strategy,
-		precompile_runtime,
-	)
-	.unwrap();
-
-	// memory grow should work as long as it doesn't exceed 1025 pages in total.
-	try_instantiate(
-		Some((0 + 1024 + 25) * WASM_PAGE_SIZE),
-		format!(
-			r#"
-			(module
-				{}
-				(global (export "__heap_base") i32 (i32.const 0))
-				(func (export "main")
-					(param i32 i32) (result i64)
-
-					;; assert(memory.grow returns != -1)
-					(if
-						(i32.eq
-							(memory.grow
-								(i32.const 25)
+	let assert_grow_ok = |alloc_strategy: HeapAllocStrategy, initial_pages: u32, max_pages: u32| {
+		eprintln!("assert_grow_ok({alloc_strategy:?}, {initial_pages}, {max_pages})");
+
+		call(
+			alloc_strategy,
+			format!(
+				r#"
+					(module
+						{}
+						(global (export "__heap_base") i32 (i32.const 0))
+						(func (export "main")
+							(param i32 i32) (result i64)
+
+							;; assert(memory.grow returns != -1)
+							(if
+								(i32.eq
+									(memory.grow
+										(i32.const 1)
+									)
+									(i32.const -1)
+								)
+								(unreachable)
 							)
-							(i32.const -1)
+
+							(i64.const 0)
 						)
-						(unreachable)
 					)
-
-					(i64.const 0)
-				)
-			)
 			"#,
-			// Zero starting pages.
-			memory(0, None, import_memory)
-		),
-		instantiation_strategy,
-		precompile_runtime,
-	)
-	.unwrap();
+				memory(initial_pages, max_pages, import_memory)
+			),
+			instantiation_strategy,
+			precompile_runtime,
+		)
+		.unwrap()
+	};
 
-	// We start with 1025 pages and try to grow at least one.
-	try_instantiate(
-		Some((1 + 1024) * WASM_PAGE_SIZE),
-		format!(
-			r#"
-			(module
-				{}
-				(global (export "__heap_base") i32 (i32.const 0))
-				(func (export "main")
-					(param i32 i32) (result i64)
-
-					;; assert(memory.grow returns == -1)
-					(if
-						(i32.ne
-							(memory.grow
-								(i32.const 1)
+	let assert_grow_fail =
+		|alloc_strategy: HeapAllocStrategy, initial_pages: u32, max_pages: u32| {
+			eprintln!("assert_grow_fail({alloc_strategy:?}, {initial_pages}, {max_pages})");
+
+			call(
+				alloc_strategy,
+				format!(
+					r#"
+						(module
+							{}
+							(global (export "__heap_base") i32 (i32.const 0))
+							(func (export "main")
+								(param i32 i32) (result i64)
+
+								;; assert(memory.grow returns == -1)
+								(if
+									(i32.ne
+										(memory.grow
+											(i32.const 1)
+										)
+										(i32.const -1)
+									)
+									(unreachable)
+								)
+
+								(i64.const 0)
 							)
-							(i32.const -1)
 						)
-						(unreachable)
-					)
-
-					(i64.const 0)
-				)
+					"#,
+					memory(initial_pages, max_pages, import_memory)
+				),
+				instantiation_strategy,
+				precompile_runtime,
 			)
-			"#,
-			// Initial=1, meaning after heap pages mount the total will be already 1025.
-			memory(1, None, import_memory)
-		),
-		instantiation_strategy,
-		precompile_runtime,
-	)
-	.unwrap();
+			.unwrap()
+		};
+
+	assert_grow_ok(HeapAllocStrategy::Dynamic { maximum_pages: Some(10) }, 1, 10);
+	assert_grow_ok(HeapAllocStrategy::Dynamic { maximum_pages: Some(10) }, 9, 10);
+	assert_grow_fail(HeapAllocStrategy::Dynamic { maximum_pages: Some(10) }, 10, 10);
+
+	assert_grow_ok(HeapAllocStrategy::Dynamic { maximum_pages: None }, 1, 10);
+	assert_grow_ok(HeapAllocStrategy::Dynamic { maximum_pages: None }, 9, 10);
+	assert_grow_ok(HeapAllocStrategy::Dynamic { maximum_pages: None }, 10, 10);
+
+	assert_grow_fail(HeapAllocStrategy::Static { extra_pages: 10 }, 1, 10);
+	assert_grow_fail(HeapAllocStrategy::Static { extra_pages: 10 }, 9, 10);
+	assert_grow_fail(HeapAllocStrategy::Static { extra_pages: 10 }, 10, 10);
 }
 
 // This test takes quite a while to execute in a debug build (over 6 minutes on a TR 3970x)
@@ -538,8 +473,7 @@ fn test_instances_without_reuse_are_not_leaked() {
 				deterministic_stack_limit: None,
 				canonicalize_nans: false,
 				parallel_compilation: true,
-				extra_heap_pages: 2048,
-				max_memory_size: None,
+				heap_alloc_strategy: HeapAllocStrategy::Static { extra_pages: 2048 },
 			},
 		},
 	)
@@ -583,6 +517,10 @@ fn test_rustix_version_matches_with_wasmtime() {
 		.unwrap();
 
 	if wasmtime_rustix.req != our_rustix.req {
-		panic!("our version of rustix ({0}) doesn't match wasmtime's ({1}); bump the version in `sc-executor-wasmtime`'s `Cargo.toml' to '{1}' and try again", our_rustix.req, wasmtime_rustix.req);
+		panic!(
+			"our version of rustix ({0}) doesn't match wasmtime's ({1}); \
+				bump the version in `sc-executor-wasmtime`'s `Cargo.toml' to '{1}' and try again",
+			our_rustix.req, wasmtime_rustix.req,
+		);
 	}
 }
diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs
index fca1a8a83b269..4f72bb0521554 100644
--- a/client/finality-grandpa/src/lib.rs
+++ b/client/finality-grandpa/src/lib.rs
@@ -75,7 +75,7 @@ use sp_api::ProvideRuntimeApi;
 use sp_application_crypto::AppKey;
 use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult};
 use sp_consensus::SelectChain;
-use sp_core::crypto::ByteArray;
+use sp_core::{crypto::ByteArray, traits::CallContext};
 use sp_finality_grandpa::{
 	AuthorityList, AuthoritySignature, SetId, CLIENT_LOG_TARGET as LOG_TARGET,
 };
@@ -479,6 +479,7 @@ where
 				"GrandpaApi_grandpa_authorities",
 				&[],
 				ExecutionStrategy::NativeElseWasm,
+				CallContext::Offchain,
 			)
 			.and_then(|call_result| {
 				Decode::decode(&mut &call_result[..]).map_err(|err| {
diff --git a/client/rpc-spec-v2/src/chain_head/chain_head.rs b/client/rpc-spec-v2/src/chain_head/chain_head.rs
index 4dbdf277e1732..4fa47dc403cc9 100644
--- a/client/rpc-spec-v2/src/chain_head/chain_head.rs
+++ b/client/rpc-spec-v2/src/chain_head/chain_head.rs
@@ -52,7 +52,7 @@ use sp_api::CallApiAt;
 use sp_blockchain::{
 	Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata,
 };
-use sp_core::{hexdisplay::HexDisplay, storage::well_known_keys, Bytes};
+use sp_core::{hexdisplay::HexDisplay, storage::well_known_keys, traits::CallContext, Bytes};
 use sp_runtime::traits::{Block as BlockT, Header};
 use std::{marker::PhantomData, sync::Arc};
 
@@ -736,6 +736,7 @@ where
 					&function,
 					&call_parameters,
 					client.execution_extensions().strategies().other,
+					CallContext::Offchain,
 				)
 				.map(|result| {
 					let result = format!("0x{:?}", HexDisplay::from(&result));
diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs
index efd2085398238..f26d42484f24f 100644
--- a/client/rpc/src/state/state_full.rs
+++ b/client/rpc/src/state/state_full.rs
@@ -46,6 +46,7 @@ use sp_core::{
 	storage::{
 		ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey,
 	},
+	traits::CallContext,
 	Bytes,
 };
 use sp_runtime::traits::Block as BlockT;
@@ -207,6 +208,7 @@ where
 						&method,
 						&call_data,
 						self.client.execution_extensions().strategies().other,
+						CallContext::Offchain,
 					)
 					.map(Into::into)
 			})
diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs
index 8942b20bd91dc..82d3e36ac80ea 100644
--- a/client/service/src/client/call_executor.rs
+++ b/client/service/src/client/call_executor.rs
@@ -21,8 +21,11 @@ use sc_client_api::{
 	backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend,
 };
 use sc_executor::{RuntimeVersion, RuntimeVersionOf};
-use sp_api::{ExecutionContext, ProofRecorder, StorageTransactionCache};
-use sp_core::traits::{CodeExecutor, RuntimeCode, SpawnNamed};
+use sp_api::{ProofRecorder, StorageTransactionCache};
+use sp_core::{
+	traits::{CallContext, CodeExecutor, RuntimeCode, SpawnNamed},
+	ExecutionContext,
+};
 use sp_runtime::{generic::BlockId, traits::Block as BlockT};
 use sp_state_machine::{
 	backend::AsTrieBackend, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof,
@@ -166,6 +169,7 @@ where
 		method: &str,
 		call_data: &[u8],
 		strategy: ExecutionStrategy,
+		context: CallContext,
 	) -> sp_blockchain::Result<Vec<u8>> {
 		let mut changes = OverlayedChanges::default();
 		let at_number =
@@ -193,6 +197,7 @@ where
 			extensions,
 			&runtime_code,
 			self.spawn_handle.clone(),
+			context,
 		)
 		.set_parent_hash(at_hash);
 
@@ -216,6 +221,11 @@ where
 			self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(at_hash))?;
 		let state = self.backend.state_at(at_hash)?;
 
+		let call_context = match context {
+			ExecutionContext::OffchainCall(_) => CallContext::Offchain,
+			_ => CallContext::Onchain,
+		};
+
 		let (execution_manager, extensions) =
 			self.execution_extensions.manager_and_extensions(at_hash, at_number, context);
 
@@ -247,6 +257,7 @@ where
 					extensions,
 					&runtime_code,
 					self.spawn_handle.clone(),
+					call_context,
 				)
 				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
@@ -262,6 +273,7 @@ where
 					extensions,
 					&runtime_code,
 					self.spawn_handle.clone(),
+					call_context,
 				)
 				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs
index b2b029f226140..f8988c5a454c3 100644
--- a/client/service/test/src/client/mod.rs
+++ b/client/service/test/src/client/mod.rs
@@ -30,7 +30,7 @@ use sc_consensus::{
 use sc_service::client::{new_in_mem, Client, LocalCallExecutor};
 use sp_api::ProvideRuntimeApi;
 use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain};
-use sp_core::{testing::TaskExecutor, H256};
+use sp_core::{testing::TaskExecutor, traits::CallContext, H256};
 use sp_runtime::{
 	generic::BlockId,
 	traits::{BlakeTwo256, Block as BlockT, Header as HeaderT},
@@ -114,6 +114,7 @@ fn construct_block(
 		Default::default(),
 		&runtime_code,
 		task_executor.clone() as Box<_>,
+		CallContext::Onchain,
 	)
 	.execute(ExecutionStrategy::NativeElseWasm)
 	.unwrap();
@@ -128,6 +129,7 @@ fn construct_block(
 			Default::default(),
 			&runtime_code,
 			task_executor.clone() as Box<_>,
+			CallContext::Onchain,
 		)
 		.execute(ExecutionStrategy::NativeElseWasm)
 		.unwrap();
@@ -142,6 +144,7 @@ fn construct_block(
 		Default::default(),
 		&runtime_code,
 		task_executor.clone() as Box<_>,
+		CallContext::Onchain,
 	)
 	.execute(ExecutionStrategy::NativeElseWasm)
 	.unwrap();
@@ -213,6 +216,7 @@ fn construct_genesis_should_work_with_native() {
 		Default::default(),
 		&runtime_code,
 		TaskExecutor::new(),
+		CallContext::Onchain,
 	)
 	.execute(ExecutionStrategy::NativeElseWasm)
 	.unwrap();
@@ -246,6 +250,7 @@ fn construct_genesis_should_work_with_wasm() {
 		Default::default(),
 		&runtime_code,
 		TaskExecutor::new(),
+		CallContext::Onchain,
 	)
 	.execute(ExecutionStrategy::AlwaysWasm)
 	.unwrap();
@@ -279,6 +284,7 @@ fn construct_genesis_with_bad_transaction_should_panic() {
 		Default::default(),
 		&runtime_code,
 		TaskExecutor::new(),
+		CallContext::Onchain,
 	)
 	.execute(ExecutionStrategy::NativeElseWasm);
 	assert!(r.is_err());
diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs
index 3fe2a2ed3c394..0e42ee3ad4b12 100644
--- a/primitives/core/src/traits.rs
+++ b/primitives/core/src/traits.rs
@@ -24,13 +24,27 @@ use std::{
 
 pub use sp_externalities::{Externalities, ExternalitiesExt};
 
+/// The context in which a call is done.
+///
+/// Depending on the context the executor may chooses different kind of heap sizes for the runtime
+/// instance.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
+pub enum CallContext {
+	/// The call is happening in some offchain context.
+	Offchain,
+	/// The call is happening in some on-chain context like building or importing a block.
+	Onchain,
+}
+
 /// Code execution engine.
 pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'static {
 	/// Externalities error type.
 	type Error: Display + Debug + Send + Sync + 'static;
 
-	/// Call a given method in the runtime. Returns a tuple of the result (either the output data
-	/// or an execution error) together with a `bool`, which is true if native execution was used.
+	/// Call a given method in the runtime.
+	///
+	/// Returns a tuple of the result (either the output data or an execution error) together with a
+	/// `bool`, which is true if native execution was used.
 	fn call(
 		&self,
 		ext: &mut dyn Externalities,
@@ -38,6 +52,7 @@ pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'stat
 		method: &str,
 		data: &[u8],
 		use_native: bool,
+		context: CallContext,
 	) -> (Result<Vec<u8>, Self::Error>, bool);
 }
 
diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs
index 538c63837920c..5fb300e12bae7 100644
--- a/primitives/state-machine/src/lib.rs
+++ b/primitives/state-machine/src/lib.rs
@@ -163,7 +163,7 @@ mod execution {
 	use sp_core::{
 		hexdisplay::HexDisplay,
 		storage::{ChildInfo, ChildType, PrefixedStorageKey},
-		traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed},
+		traits::{CallContext, CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed},
 	};
 	use sp_externalities::Extensions;
 	use std::{
@@ -295,6 +295,7 @@ mod execution {
 		///
 		/// Used for logging.
 		parent_hash: Option<H::Out>,
+		context: CallContext,
 	}
 
 	impl<'a, B, H, Exec> Drop for StateMachine<'a, B, H, Exec>
@@ -324,6 +325,7 @@ mod execution {
 			mut extensions: Extensions,
 			runtime_code: &'a RuntimeCode,
 			spawn_handle: impl SpawnNamed + Send + 'static,
+			context: CallContext,
 		) -> Self {
 			extensions.register(ReadRuntimeVersionExt::new(exec.clone()));
 			extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle));
@@ -339,6 +341,7 @@ mod execution {
 				runtime_code,
 				stats: StateMachineStats::default(),
 				parent_hash: None,
+				context,
 			}
 		}
 
@@ -408,6 +411,7 @@ mod execution {
 				self.method,
 				self.call_data,
 				use_native,
+				self.context,
 			);
 
 			self.overlay
@@ -574,6 +578,7 @@ mod execution {
 			extensions,
 			runtime_code,
 			spawn_handle,
+			CallContext::Offchain,
 		)
 		.execute_using_consensus_failure_handler::<_>(always_wasm())?;
 
@@ -638,6 +643,7 @@ mod execution {
 			Extensions::default(),
 			runtime_code,
 			spawn_handle,
+			CallContext::Offchain,
 		)
 		.execute_using_consensus_failure_handler(always_untrusted_wasm())
 	}
@@ -1318,7 +1324,7 @@ mod tests {
 		map,
 		storage::{ChildInfo, StateVersion},
 		testing::TaskExecutor,
-		traits::{CodeExecutor, Externalities, RuntimeCode},
+		traits::{CallContext, CodeExecutor, Externalities, RuntimeCode},
 	};
 	use sp_runtime::traits::BlakeTwo256;
 	use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1};
@@ -1341,6 +1347,7 @@ mod tests {
 			_method: &str,
 			_data: &[u8],
 			use_native: bool,
+			_: CallContext,
 		) -> (CallResult<Self::Error>, bool) {
 			let using_native = use_native && self.native_available;
 			match (using_native, self.native_succeeds, self.fallback_succeeds) {
@@ -1388,6 +1395,7 @@ mod tests {
 			Default::default(),
 			&wasm_code,
 			TaskExecutor::new(),
+			CallContext::Offchain,
 		);
 
 		assert_eq!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), vec![66]);
@@ -1416,6 +1424,7 @@ mod tests {
 			Default::default(),
 			&wasm_code,
 			TaskExecutor::new(),
+			CallContext::Offchain,
 		);
 
 		assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]);
@@ -1445,6 +1454,7 @@ mod tests {
 			Default::default(),
 			&wasm_code,
 			TaskExecutor::new(),
+			CallContext::Offchain,
 		);
 
 		assert!(state_machine
diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml
index 65c4ae42f044e..306a47e6dc1b7 100644
--- a/primitives/wasm-interface/Cargo.toml
+++ b/primitives/wasm-interface/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] }
 impl-trait-for-tuples = "0.2.2"
 log = { version = "0.4.17", optional = true }
-wasmi = { version = "0.13", optional = true }
+wasmi = { version = "0.13.2", optional = true }
 wasmtime = { version = "6.0.0", default-features = false, optional = true }
 anyhow = { version = "1.0.68", optional = true }
 sp-std = { version = "5.0.0", default-features = false, path = "../std" }
diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs
index 1fa3c7a00b9c5..720b0a1e06f07 100644
--- a/test-utils/runtime/src/lib.rs
+++ b/test-utils/runtime/src/lib.rs
@@ -1352,7 +1352,7 @@ mod tests {
 	use sc_block_builder::BlockBuilderProvider;
 	use sp_api::ProvideRuntimeApi;
 	use sp_consensus::BlockOrigin;
-	use sp_core::storage::well_known_keys::HEAP_PAGES;
+	use sp_core::{storage::well_known_keys::HEAP_PAGES, ExecutionContext};
 	use sp_state_machine::ExecutionStrategy;
 	use substrate_test_runtime_client::{
 		prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder,
@@ -1371,7 +1371,12 @@ mod tests {
 
 		// Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger
 		// than the heap.
-		let ret = client.runtime_api().vec_with_capacity(best_hash, 1048576);
+		let ret = client.runtime_api().vec_with_capacity_with_context(
+			best_hash,
+			// Use `BlockImport` to ensure we use the on chain heap pages as configured above.
+			ExecutionContext::Importing,
+			1048576,
+		);
 		assert!(ret.is_err());
 
 		// Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to
diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs
index 76ddf11a85196..657e3c90fa6d3 100644
--- a/test-utils/runtime/src/system.rs
+++ b/test-utils/runtime/src/system.rs
@@ -354,7 +354,7 @@ mod tests {
 	use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod};
 	use sp_core::{
 		map,
-		traits::{CodeExecutor, RuntimeCode},
+		traits::{CallContext, CodeExecutor, RuntimeCode},
 	};
 	use sp_io::{hashing::twox_128, TestExternalities};
 	use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring};
@@ -438,7 +438,14 @@ mod tests {
 			};
 
 			executor()
-				.call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false)
+				.call(
+					&mut ext,
+					&runtime_code,
+					"Core_execute_block",
+					&b.encode(),
+					false,
+					CallContext::Offchain,
+				)
 				.0
 				.unwrap();
 		})
@@ -540,7 +547,14 @@ mod tests {
 			};
 
 			executor()
-				.call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false)
+				.call(
+					&mut ext,
+					&runtime_code,
+					"Core_execute_block",
+					&b.encode(),
+					false,
+					CallContext::Offchain,
+				)
 				.0
 				.unwrap();
 		})
diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs
index 3f0e9298c7713..60f078142e17f 100644
--- a/utils/frame/benchmarking-cli/src/pallet/command.rs
+++ b/utils/frame/benchmarking-cli/src/pallet/command.rs
@@ -30,9 +30,12 @@ use sc_client_db::BenchmarkingState;
 use sc_executor::NativeElseWasmExecutor;
 use sc_service::{Configuration, NativeExecutionDispatch};
 use serde::Serialize;
-use sp_core::offchain::{
-	testing::{TestOffchainExt, TestTransactionPoolExt},
-	OffchainDbExt, OffchainWorkerExt, TransactionPoolExt,
+use sp_core::{
+	offchain::{
+		testing::{TestOffchainExt, TestTransactionPoolExt},
+		OffchainDbExt, OffchainWorkerExt, TransactionPoolExt,
+	},
+	traits::CallContext,
 };
 use sp_externalities::Extensions;
 use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr};
@@ -235,6 +238,7 @@ impl PalletCmd {
 			extensions(),
 			&sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?,
 			sp_core::testing::TaskExecutor::new(),
+			CallContext::Offchain,
 		)
 		.execute(strategy.into())
 		.map_err(|e| format!("{}: {}", ERROR_METADATA_NOT_FOUND, e))?;
@@ -372,6 +376,7 @@ impl PalletCmd {
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						sp_core::testing::TaskExecutor::new(),
+						CallContext::Offchain,
 					)
 					.execute(strategy.into())
 					.map_err(|e| {
@@ -412,6 +417,7 @@ impl PalletCmd {
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						sp_core::testing::TaskExecutor::new(),
+						CallContext::Offchain,
 					)
 					.execute(strategy.into())
 					.map_err(|e| format!("Error executing runtime benchmark: {}", e))?;
@@ -444,6 +450,7 @@ impl PalletCmd {
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						sp_core::testing::TaskExecutor::new(),
+						CallContext::Offchain,
 					)
 					.execute(strategy.into())
 					.map_err(|e| format!("Error executing runtime benchmark: {}", e))?;
diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs
index 04459ab5920d1..6fb4b44392546 100644
--- a/utils/frame/try-runtime/cli/src/lib.rs
+++ b/utils/frame/try-runtime/cli/src/lib.rs
@@ -378,7 +378,7 @@ use sp_core::{
 	},
 	storage::well_known_keys,
 	testing::TaskExecutor,
-	traits::{ReadRuntimeVersion, TaskExecutorExt},
+	traits::{CallContext, ReadRuntimeVersion, TaskExecutorExt},
 	twox_128, H256,
 };
 use sp_externalities::Extensions;
@@ -877,6 +877,7 @@ pub(crate) fn state_machine_call<Block: BlockT, HostFns: HostFunctions>(
 		extensions,
 		&sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?,
 		sp_core::testing::TaskExecutor::new(),
+		CallContext::Offchain,
 	)
 	.execute(sp_state_machine::ExecutionStrategy::AlwaysWasm)
 	.map_err(|e| format!("failed to execute '{}': {}", method, e))
@@ -916,6 +917,7 @@ pub(crate) fn state_machine_call_with_proof<Block: BlockT, HostFns: HostFunction
 		extensions,
 		&runtime_code,
 		sp_core::testing::TaskExecutor::new(),
+		CallContext::Offchain,
 	)
 	.execute(sp_state_machine::ExecutionStrategy::AlwaysWasm)
 	.map_err(|e| format!("failed to execute {}: {}", method, e))