From 27d900a7b4d638f9c47462605f06ffbd00043a7b Mon Sep 17 00:00:00 2001
From: Anton Voronov <anton.voronov@intel.com>
Date: Mon, 19 Dec 2022 00:37:25 +0000
Subject: [PATCH] sparsity: removed unused code

---
 include/oneapi/dnnl/dnnl.h                |  52 +------
 include/oneapi/dnnl/dnnl.hpp              | 143 +----------------
 include/oneapi/dnnl/dnnl_types.h          |  26 ----
 src/common/c_types_map.hpp                |   4 -
 src/common/dnnl_debug_autogenerated.cpp   |   4 -
 src/common/memory.cpp                     | 179 +---------------------
 src/common/memory.hpp                     |  52 +------
 src/common/memory_desc_wrapper.hpp        |  61 +-------
 src/common/primitive.hpp                  |   6 +-
 src/common/primitive_exec_types.cpp       |   5 +-
 src/common/primitive_exec_types.hpp       |   6 +-
 src/common/primitive_hashing_utils.cpp    |  16 --
 src/common/primitive_hashing_utils.hpp    |   9 --
 src/common/type_helpers.hpp               |  23 +--
 src/common/utils.hpp                      |   4 +-
 src/cpu/reorder/simple_sparse_reorder.hpp |   2 +-
 src/cpu/x64/jit_brgemm_inner_product.cpp  |   2 +-
 17 files changed, 34 insertions(+), 560 deletions(-)

diff --git a/include/oneapi/dnnl/dnnl.h b/include/oneapi/dnnl/dnnl.h
index 549208b2df9..86dd0bc7c44 100644
--- a/include/oneapi/dnnl/dnnl.h
+++ b/include/oneapi/dnnl/dnnl.h
@@ -1169,11 +1169,7 @@ dnnl_status_t DNNL_API dnnl_memory_desc_init_by_tag(
 /// @returns #dnnl_success on success and a status describing the error
 ///     otherwise.
 dnnl_status_t DNNL_API dnnl_sparse_desc_init(dnnl_sparse_desc_t *sparse_desc,
-        dnnl_sparse_encoding_t encoding, int ndims_order,
-        const dnnl_dims_t dims_order, dnnl_dim_t nnze, int ntypes,
-        const dnnl_data_type_t *metadata_types, int nentry_dims,
-        const dnnl_dim_t *entry_dims, int structure_ndims,
-        const dnnl_dim_t *structure_dims, const dnnl_dim_t *structure_nnz);
+        dnnl_sparse_encoding_t encoding);
 
 /// Initializes a memory descriptor by the given sparse descriptor.
 ///
@@ -1316,17 +1312,6 @@ int DNNL_API dnnl_memory_desc_equal(
 size_t DNNL_API dnnl_memory_desc_get_size(
         const dnnl_memory_desc_t *memory_desc);
 
-/// Returns the size of a values and metadata for a particular sparse encoding.
-///
-/// @param memory_desc Memory descriptor.
-/// @param index Index that correspondes to values or metadata. Each sparse
-///     encoding defines index interpretation.
-///
-/// @returns The number of bytes required for values or metadata for a
-///     particular sparse encoding described by a memory descriptor.
-size_t DNNL_API dnnl_memory_desc_get_size_sparse(
-        const dnnl_memory_desc_t *memory_desc, int index);
-
 /// Returns the size of data type.
 ///
 /// @param data_type Data type.
@@ -1357,27 +1342,6 @@ dnnl_status_t DNNL_API dnnl_memory_create(dnnl_memory_t *memory,
         const dnnl_memory_desc_t *memory_desc, dnnl_engine_t engine,
         void *handle);
 
-/// Creates a sparse memory object.
-///
-/// @param memory Output memory object.
-/// @param memory_desc Memory descriptor.
-/// @param engine Engine to use.
-/// @param nhandles Number of handles.
-/// @param handles Handles of the memory buffers to use as underlying storages.
-///     For each element of the @p handles array the following applies:
-///     - A pointer to the user-allocated buffer. In this case the library
-///       doesn't own the buffer.
-///     - The DNNL_MEMORY_ALLOCATE special value. Instructs the library to
-///       allocate the buffer for the memory object. In this case the library
-///       owns the buffer.
-///     - DNNL_MEMORY_NONE Instructs the library to skip allocation of the
-///       memory buffer.
-/// @returns #dnnl_success on success and a status describing the error
-///     otherwise.
-dnnl_status_t DNNL_API dnnl_memory_create_sparse(dnnl_memory_t *memory,
-        const dnnl_memory_desc_t *memory_desc, dnnl_engine_t engine,
-        dnnl_dim_t nhandles, void **handles);
-
 /// Returns the memory descriptor for a memory object.
 ///
 /// @param memory Memory object.
@@ -1439,13 +1403,6 @@ dnnl_status_t DNNL_API dnnl_memory_map_data(
 dnnl_status_t DNNL_API dnnl_memory_unmap_data(
         const_dnnl_memory_t memory, void *mapped_ptr);
 
-// TODO: add documentation and put these in the right place.
-dnnl_status_t DNNL_API dnnl_memory_map_data_sparse(
-        const_dnnl_memory_t memory, int index, void **mapped_ptr);
-
-dnnl_status_t DNNL_API dnnl_memory_unmap_data_sparse(
-        const_dnnl_memory_t memory, int index, void *mapped_ptr);
-
 /// Returns memory object's data handle.
 ///
 /// @param memory Memory object.
@@ -1470,13 +1427,6 @@ dnnl_status_t DNNL_API dnnl_memory_set_data_handle(
 
 dnnl_status_t DNNL_API dnnl_memory_set_data_handle_no_pads_proc(
         dnnl_memory_t memory, void *handle);
-        
-// TODO: add documentation and put these in the right place.
-dnnl_status_t DNNL_API dnnl_memory_get_data_handles(
-        const_dnnl_memory_t memory, dnnl_dim_t *nhandles, void **handles);
-
-dnnl_status_t DNNL_API dnnl_memory_set_data_handles(
-        dnnl_memory_t memory, dnnl_dim_t nhandles, void **handles);
 
 /// Sets the underlying memory buffer.
 ///
diff --git a/include/oneapi/dnnl/dnnl.hpp b/include/oneapi/dnnl/dnnl.hpp
index ddcb253b672..04bc2ea442f 100644
--- a/include/oneapi/dnnl/dnnl.hpp
+++ b/include/oneapi/dnnl/dnnl.hpp
@@ -2497,22 +2497,9 @@ struct memory : public handle<dnnl_memory_t> {
     /// A memory descriptor.
     struct desc {
         struct sparse_desc {
-            sparse_desc(dnnl_sparse_encoding_t encoding, const dims &dims_order,
-                    dim nnze, const std::vector<data_type> &metadata_types,
-                    const dims &entry_dims, const dims &structure_dims,
-                    const dims &structure_nnz, bool allow_empty = false) {
-                std::vector<dnnl_data_type_t> c_metadata_types(
-                        metadata_types.size());
-                for (size_t i = 0; i < c_metadata_types.size(); i++) {
-                    c_metadata_types[i] = convert_to_c(metadata_types[i]);
-                }
+            sparse_desc(dnnl_sparse_encoding_t encoding, bool allow_empty = false) {
                 // TODO: check structure_dims.size() == structure_nnz.size();
-                dnnl_status_t status = dnnl_sparse_desc_init(&data, encoding,
-                        (int)dims_order.size(), dims_order.data(), nnze,
-                        (int)c_metadata_types.size(), c_metadata_types.data(),
-                        (int)entry_dims.size(), entry_dims.data(),
-                        (int)structure_dims.size(), structure_dims.data(),
-                        structure_nnz.data());
+                dnnl_status_t status = dnnl_sparse_desc_init(&data, encoding);
                 if (!allow_empty)
                     error::wrap_c_api(
                             status, "could not construct a sparse descriptor");
@@ -2602,41 +2589,8 @@ struct memory : public handle<dnnl_memory_t> {
                         "sparse descriptor");
         }
 
-        /// Function for creating CSR sparse desc with unstructured sparsity.
-        static sparse_desc csr(dim nnze, data_type index_type,
-                data_type pointer_type, bool allow_empty = false) {
-            return sparse_desc(dnnl_sparse_encoding_csr, {0, 1}, nnze,
-                    {index_type, pointer_type}, {}, {}, {}, allow_empty);
-        }
-
-        /// Function for creating CSC sparse desc with unstructured sparsity.
-        static sparse_desc csc(dim nnze, data_type index_type,
-                data_type pointer_type, bool allow_empty = false) {
-            return sparse_desc(dnnl_sparse_encoding_csc, {1, 0}, nnze,
-                    {index_type, pointer_type}, {}, {}, {}, allow_empty);
-        }
-
-        /// Function for creating BCSR sparse desc with unstructured sparsity.
-        static sparse_desc bcsr(dim nnze, data_type index_type,
-                data_type pointer_type, const dims &block_dims,
-                bool allow_empty = false) {
-            return sparse_desc(dnnl_sparse_encoding_bcsr, {0, 1}, nnze,
-                    {index_type, pointer_type}, block_dims, {}, {},
-                    allow_empty);
-        }
-
-        /// Function for creating BCSC sparse desc unstructured sparsity.
-        static sparse_desc bcsc(dim nnze, data_type index_type,
-                data_type pointer_type, const dims &block_dims,
-                bool allow_empty = false) {
-            return sparse_desc(dnnl_sparse_encoding_bcsc, {1, 0}, nnze,
-                    {index_type, pointer_type}, block_dims, {}, {},
-                    allow_empty);
-        }
-
-        static sparse_desc packed(dim nnze, bool allow_empty = false) {
-            return sparse_desc(dnnl_sparse_encoding_packed, {}, nnze, {}, {},
-                    {}, {}, allow_empty);
+        static sparse_desc packed(bool allow_empty = false) {
+            return sparse_desc(dnnl_sparse_encoding_packed, allow_empty);
         }
 
         /// Constructs a memory descriptor for a region inside an area
@@ -2786,18 +2740,6 @@ struct memory : public handle<dnnl_memory_t> {
         ///     including the padding area.
         size_t get_size() const { return dnnl_memory_desc_get_size(&data); }
 
-        /// Returns the size of a values and metadata for a particular sparse
-        /// encoding.
-        ///
-        /// @param index Index that correspondes to values or metadata.
-        ///     Each sparse encoding defines index interpretation.
-        ///
-        /// @returns The number of bytes required for values or metadata for a
-        ///     particular sparse encoding described by a memory descriptor.
-        size_t get_size(int index) const {
-            return dnnl_memory_desc_get_size_sparse(&data, index);
-        }
-
         /// Checks whether the memory descriptor is zero (empty).
         /// @returns @c true if the memory descriptor describes an empty
         ///     memory and @c false otherwise.
@@ -2858,44 +2800,12 @@ struct memory : public handle<dnnl_memory_t> {
 
     /// Constructs a memory object.
     ///
-    /// The underlying buffer(s) for the memory will be allocated by the
-    /// library.
+    /// The underlying buffer for the memory will be allocated by the library.
     ///
     /// @param md Memory descriptor.
     /// @param aengine Engine to store the data on.
-    memory(const desc &md, const engine &aengine) {
-        dnnl_status_t status;
-        dnnl_memory_t result;
-        const bool is_sparse_md = md.data.format_kind == dnnl_format_sparse;
-        if (is_sparse_md) {
-            // Deduce number of handles.
-            dim nhandles = 0;
-            switch (md.data.format_desc.sparse_desc.encoding) {
-                case dnnl_sparse_encoding_csr:
-                case dnnl_sparse_encoding_csc:
-                case dnnl_sparse_encoding_bcsr:
-                case dnnl_sparse_encoding_bcsc: nhandles = 3; break;
-                case dnnl_sparse_encoding_packed: nhandles = 1; break;
-                default: nhandles = 0;
-            }
-            std::vector<void *> handles(nhandles, DNNL_MEMORY_ALLOCATE);
-            status = dnnl_memory_create_sparse(&result, &md.data, aengine.get(),
-                    (dim)handles.size(), handles.data());
-        } else {
-            status = dnnl_memory_create(
-                    &result, &md.data, aengine.get(), DNNL_MEMORY_ALLOCATE);
-        }
-        error::wrap_c_api(status, "could not create a memory object");
-        reset(result);
-    }
-
-    memory(const desc &md, const engine &aengine, std::vector<void *> handles) {
-        dnnl_memory_t result;
-        dnnl_status_t status = dnnl_memory_create_sparse(&result, &md.data,
-                aengine.get(), (dim)handles.size(), handles.data());
-        error::wrap_c_api(status, "could not create a memory object");
-        reset(result);
-    }
+    memory(const desc &md, const engine &aengine)
+        : memory(md, aengine, DNNL_MEMORY_ALLOCATE) {}
 
     /// Returns the associated memory descriptor.
     desc get_desc() const {
@@ -2924,28 +2834,6 @@ struct memory : public handle<dnnl_memory_t> {
         return handle;
     }
 
-    // TODO: add documentation.
-    std::vector<void *> get_data_handles() const {
-        dim nhandles;
-        error::wrap_c_api(
-                dnnl_memory_get_data_handles(get(), &nhandles, nullptr),
-                "could not get a number of native handles from a memory "
-                "object");
-        std::vector<void *> handles(nhandles);
-        error::wrap_c_api(
-                dnnl_memory_get_data_handles(get(), &nhandles, handles.data()),
-                "could not get native handles from a memory object");
-        return handles;
-    }
-
-    // TODO: add documentation.
-    void set_data_handles(std::vector<void *> handles) {
-        dim nhandles = handles.size();
-        error::wrap_c_api(
-                dnnl_memory_set_data_handles(get(), nhandles, handles.data()),
-                "could not set native handles of a memory object");
-    }
-
     /// Sets the underlying memory buffer.
     ///
     /// This function may write zero values to the memory specified by the @p
@@ -3031,23 +2919,6 @@ struct memory : public handle<dnnl_memory_t> {
         return static_cast<T *>(mapped_ptr);
     }
 
-    // TODO: add documentation.
-    template <typename T = void>
-    T *map_data(int index) const {
-        void *mapped_ptr;
-        error::wrap_c_api(
-                dnnl_memory_map_data_sparse(get(), index, &mapped_ptr),
-                "could not map memory object data");
-        return static_cast<T *>(mapped_ptr);
-    }
-
-    // TODO: add documentation.
-    void unmap_data(int index, void *mapped_ptr) const {
-        error::wrap_c_api(
-                dnnl_memory_unmap_data_sparse(get(), index, mapped_ptr),
-                "could not unmap memory object data");
-    }
-
     /// Unmaps a memory object and writes back any changes made to the
     /// previously mapped memory buffer.
     ///
diff --git a/include/oneapi/dnnl/dnnl_types.h b/include/oneapi/dnnl/dnnl_types.h
index 15237bb4050..e154255a377 100644
--- a/include/oneapi/dnnl/dnnl_types.h
+++ b/include/oneapi/dnnl/dnnl_types.h
@@ -1823,38 +1823,12 @@ typedef struct {
 typedef enum {
     dnnl_sparse_encoding_undef = 0,
     dnnl_sparse_encoding_any,
-    dnnl_sparse_encoding_csr,
-    dnnl_sparse_encoding_csc,
-    dnnl_sparse_encoding_bcsr,
-    dnnl_sparse_encoding_bcsc,
     dnnl_sparse_encoding_packed,
 } dnnl_sparse_encoding_t;
 
-/// Maximum number of types for metadata.
-#define DNNL_MAX_METADATA_TYPES 12
-
 typedef struct {
     /// Specifies what encoding is used.
     dnnl_sparse_encoding_t encoding;
-    /// Order of dimensions. E.g. for CSR it's [0, 1], for CSC [1, 0].
-    dnnl_dims_t dims_order;
-    /// Number of non-zero entries.
-    dnnl_dim_t nnze;
-    /// Metadata types. Each encoding defines how to interpret these.
-    dnnl_data_type_t metadata_types[DNNL_MAX_METADATA_TYPES];
-    /// Dimensions of an entry. For example: 1x1 for CSR/CSC or MxN for
-    /// BCSR/BCSC.
-    dnnl_dim_t entry_dims[2];
-
-    /// Section that describes sparsity pattern.
-    ///
-    /// Number of dimensions of a structure block. When ndims is 0 then sparsity
-    /// pattern is considered unstructured.
-    int structure_ndims;
-    /// Dimensions of a structure block.
-    dnnl_dim_t structure_dims[2];
-    /// Number of non-zero elements per-dimension.
-    dnnl_dim_t structure_nnz[2];
     /// Descriptor for blocked bitmask - opaque.
     dnnl_blocking_desc_t packed_desc;
 } dnnl_sparse_desc_t;
diff --git a/src/common/c_types_map.hpp b/src/common/c_types_map.hpp
index b6775967bdf..ed5381f5607 100644
--- a/src/common/c_types_map.hpp
+++ b/src/common/c_types_map.hpp
@@ -1221,10 +1221,6 @@ using sparse_encoding_t = dnnl_sparse_encoding_t;
 namespace sparse_encoding {
 const sparse_encoding_t undef = dnnl_sparse_encoding_undef;
 const sparse_encoding_t any = dnnl_sparse_encoding_any;
-const sparse_encoding_t csr = dnnl_sparse_encoding_csr;
-const sparse_encoding_t csc = dnnl_sparse_encoding_csc;
-const sparse_encoding_t bcsr = dnnl_sparse_encoding_bcsr;
-const sparse_encoding_t bcsc = dnnl_sparse_encoding_bcsc;
 const sparse_encoding_t packed = dnnl_sparse_encoding_packed;
 } // namespace sparse_encoding
 
diff --git a/src/common/dnnl_debug_autogenerated.cpp b/src/common/dnnl_debug_autogenerated.cpp
index ed722780d3b..3f43a3c41b3 100644
--- a/src/common/dnnl_debug_autogenerated.cpp
+++ b/src/common/dnnl_debug_autogenerated.cpp
@@ -1212,10 +1212,6 @@ const char *dnnl_alg_kind2str(dnnl_alg_kind_t v) {
 const char *dnnl_sparse_encoding2str(dnnl_sparse_encoding_t v) {
     if (v == dnnl_sparse_encoding_undef) return "undef";
     if (v == dnnl_sparse_encoding_any) return "any";
-    if (v == dnnl_sparse_encoding_csr) return "sparse_encoding_csr";
-    if (v == dnnl_sparse_encoding_csc) return "sparse_encoding_csc";
-    if (v == dnnl_sparse_encoding_bcsr) return "sparse_encoding_bcsr";
-    if (v == dnnl_sparse_encoding_bcsc) return "sparse_encoding_bcsc";
     if (v == dnnl_sparse_encoding_packed) return "sparse_encoding_packed";
     assert(!"unknown sparse_encoding");
     return "unknown sparse_encoding";
diff --git a/src/common/memory.cpp b/src/common/memory.cpp
index ac98234b33e..d0696c7051e 100644
--- a/src/common/memory.cpp
+++ b/src/common/memory.cpp
@@ -57,7 +57,7 @@ namespace {
 //    no one will do concurrent mapping for overlapping memory objects.
 //
 // XXX: remove limitation mentioned in 2nd bullet.
-size_t memory_desc_map_size(const memory_desc_t *md, int index = 0) {
+size_t memory_desc_map_size(const memory_desc_t *md) {
     auto mdw = memory_desc_wrapper(md);
 
     if (mdw.has_runtime_dims_or_strides()) return DNNL_RUNTIME_SIZE_VAL;
@@ -65,31 +65,11 @@ size_t memory_desc_map_size(const memory_desc_t *md, int index = 0) {
 
     memory_desc_t md_no_offset0 = *md;
     md_no_offset0.offset0 = 0;
-    return memory_desc_wrapper(md_no_offset0).size(index)
+    return memory_desc_wrapper(md_no_offset0).size()
             + md->offset0 * mdw.data_type_size();
 }
 } // namespace
 
-dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine,
-        const dnnl::impl::memory_desc_t *md, const std::vector<unsigned> &flags,
-        std::vector<void *> &handles)
-    : engine_(engine), md_(*md) {
-
-    const size_t nhandles = handles.size();
-    std::vector<std::unique_ptr<dnnl::impl::memory_storage_t>> mem_storages(
-            nhandles);
-    for (size_t i = 0; i < nhandles; i++) {
-        const size_t size = memory_desc_wrapper(md_).size(i);
-        memory_storage_t *memory_storage_ptr;
-        status_t status = engine->create_memory_storage(
-                &memory_storage_ptr, flags[i], size, handles[i]);
-        if (status != success) return;
-        mem_storages[i].reset(memory_storage_ptr);
-    }
-
-    memory_storages_ = std::move(mem_storages);
-}
-
 dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine,
         const dnnl::impl::memory_desc_t *md, unsigned flags, void *handle)
     : engine_(engine), md_(*md) {
@@ -100,7 +80,7 @@ dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine,
             &memory_storage_ptr, flags, size, handle);
     if (status != success) return;
 
-    memory_storages_.emplace_back(memory_storage_ptr);
+    memory_storage_.reset(memory_storage_ptr);
 }
 
 dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine,
@@ -117,7 +97,7 @@ status_t dnnl_memory::set_data_handle(void *handle, stream_t *stream, bool pads_
     CHECK(memory_storage()->get_data_handle(&old_handle));
 
     if (handle != old_handle) {
-        CHECK(memory_storages_[0]->set_data_handle(handle));
+        CHECK(memory_storage_->set_data_handle(handle));
     }
 
     memory_arg_t mem_arg = {this, true};
@@ -128,20 +108,14 @@ status_t dnnl_memory::set_data_handle(void *handle, stream_t *stream, bool pads_
 status_t dnnl_memory::reset_memory_storage(
         std::unique_ptr<dnnl::impl::memory_storage_t> &&memory_storage) {
     if (memory_storage) {
-        if (memory_storages_.empty())
-            memory_storages_.emplace_back(std::move(memory_storage));
-        else
-            memory_storages_[0] = std::move(memory_storage);
+        memory_storage_ = std::move(memory_storage);
     } else {
         memory_storage_t *memory_storage_ptr;
         status_t status = engine_->create_memory_storage(
                 &memory_storage_ptr, use_runtime_ptr, 0, nullptr);
         if (status != status::success) return status;
 
-        if (memory_storages_.empty())
-            memory_storages_.emplace_back(memory_storage_ptr);
-        else
-            memory_storages_[0].reset(memory_storage_ptr);
+        memory_storage_.reset(memory_storage_ptr);
     }
 
     return status::success;
@@ -229,36 +203,11 @@ status_t dnnl_memory_desc_init_by_strides(memory_desc_t *memory_desc, int ndims,
     return success;
 }
 
-status_t dnnl_sparse_desc_init(sparse_desc_t *sparse_desc,
-        sparse_encoding_t encoding, int ndims_order, const dims_t dims_order,
-        dim_t nnze, int ntypes, const data_type_t *metadata_types,
-        int nentry_dims, const dim_t *entry_dims, int structure_ndims,
-        const dim_t *structure_dims, const dim_t *structure_nnz) {
+status_t dnnl_sparse_desc_init(sparse_desc_t *sparse_desc, sparse_encoding_t encoding) {
     if (!sparse_desc) return invalid_arguments;
-    if (ntypes > 0 && !metadata_types) return invalid_arguments;
-    if (nentry_dims > 0 && !entry_dims) return invalid_arguments;
-    if (structure_ndims > 0 && (!structure_dims || !structure_nnz))
-        return invalid_arguments;
-
-    // sparse descriptor is empty.
-    if (nnze == 0) {
-        (*sparse_desc) = sparse_desc_t();
-        return success;
-    }
-
-    // TODO: add more checks
 
     auto sd = sparse_desc_t();
     sd.encoding = encoding;
-    array_copy(sd.dims_order, dims_order, ndims_order);
-    sd.nnze = nnze;
-    array_copy(sd.metadata_types, metadata_types, ntypes);
-    array_copy(sd.entry_dims, entry_dims, nentry_dims);
-    if (structure_ndims > 0) {
-        sd.structure_ndims = structure_ndims;
-        array_copy(sd.structure_dims, structure_dims, structure_ndims);
-        array_copy(sd.structure_nnz, structure_nnz, structure_ndims);
-    }
 
     *sparse_desc = sd;
 
@@ -584,11 +533,6 @@ size_t dnnl_memory_desc_get_size(const memory_desc_t *md) {
     return memory_desc_wrapper(*md).size();
 }
 
-size_t dnnl_memory_desc_get_size_sparse(
-        const dnnl_memory_desc_t *md, int index) {
-    return memory_desc_wrapper(*md).size(index);
-}
-
 size_t dnnl_data_type_size(dnnl_data_type_t data_type) {
     return types::data_type_size(data_type);
 }
@@ -626,46 +570,6 @@ status_t dnnl_memory_create(memory_t **memory, const memory_desc_t *md,
     return success;
 }
 
-status_t dnnl_memory_create_sparse(dnnl_memory_t *memory,
-        const dnnl_memory_desc_t *md, dnnl_engine_t engine, dnnl_dim_t nhandles,
-        void **handles) {
-    assert(DNNL_CPU_RUNTIME != DNNL_RUNTIME_SYCL);
-    assert(DNNL_GPU_RUNTIME != DNNL_RUNTIME_SYCL);
-    assert(DNNL_GPU_RUNTIME != DNNL_RUNTIME_OCL);
-
-    // TODO: consider combinin part of functionality with non-sparse
-    // counterpart above.
-    memory_desc_t z_md = types::zero_md();
-    if (md == nullptr) md = &z_md;
-
-    const auto mdw = memory_desc_wrapper(md);
-    if (mdw.format_any() || mdw.has_runtime_dims_or_strides())
-        return invalid_arguments;
-
-    std::vector<unsigned> flags(nhandles);
-    std::vector<void *> handles_(nhandles);
-    for (size_t i = 0; i < handles_.size(); i++) {
-        unsigned f = (handles[i] == DNNL_MEMORY_ALLOCATE)
-                ? memory_flags_t::alloc
-                : memory_flags_t::use_runtime_ptr;
-        void *handle_ptr
-                = (handles[i] == DNNL_MEMORY_ALLOCATE) ? nullptr : handles[i];
-        flags[i] = f;
-        handles_[i] = handle_ptr;
-    }
-
-    auto _memory = new memory_t(engine, md, flags, handles_);
-    if (_memory == nullptr) return out_of_memory;
-    for (size_t i = 0; i < handles_.size(); i++) {
-        if (_memory->memory_storage(i) == nullptr) {
-            delete _memory;
-            return out_of_memory;
-        }
-    }
-    *memory = _memory;
-    return success;
-}
-
 status_t dnnl_memory_get_memory_desc(
         const memory_t *memory, const memory_desc_t **md) {
     if (any_null(memory, md)) return invalid_arguments;
@@ -696,42 +600,6 @@ status_t dnnl_memory_set_data_handle_no_pads_proc(memory_t *memory, void *handle
     return dnnl_memory_set_data_handle_v2_no_pads_proc(memory, handle, nullptr);
 }
 
-status_t dnnl_memory_get_data_handles(
-        const_dnnl_memory_t memory, dnnl_dim_t *nhandles, void **handles) {
-    if (!nhandles) return invalid_arguments;
-
-    // User queries number of handles without a valid memory object.
-    if (!memory) {
-        (*nhandles) = 0;
-        return success;
-    }
-
-    std::vector<void *> queried_handles;
-    // User queries number of handles with a valid memory object.
-    if (!handles) {
-        memory->get_data_handles(queried_handles);
-        (*nhandles) = queried_handles.size();
-        return success;
-    }
-
-    // User queries the handles.
-    memory->get_data_handles(queried_handles);
-    if ((*nhandles) != (int)queried_handles.size()) return invalid_arguments;
-    for (size_t i = 0; i < queried_handles.size(); i++) {
-        handles[i] = queried_handles[i];
-    }
-
-    return success;
-}
-
-status_t dnnl_memory_set_data_handles(
-        dnnl_memory_t memory, dnnl_dim_t nhandles, void **handles) {
-    if (any_null(memory, handles) || nhandles == 0) return invalid_arguments;
-    if ((int)memory->get_num_handles() != nhandles) return invalid_arguments;
-    std::vector<void *> handles_vec(handles, handles + nhandles);
-    return memory->set_data_handles(std::move(handles_vec), nullptr);
-}
-
 status_t dnnl_memory_set_data_handle_v2(
         memory_t *memory, void *handle, stream_t *stream) {
     if (any_null(memory)) return invalid_arguments;
@@ -775,39 +643,6 @@ status_t dnnl_memory_unmap_data(const memory_t *memory, void *mapped_ptr) {
     return memory->memory_storage()->unmap_data(mapped_ptr, nullptr);
 }
 
-status_t dnnl_memory_map_data_sparse(
-        const_dnnl_memory_t memory, int index, void **mapped_ptr) {
-    bool args_ok = !any_null(memory, mapped_ptr);
-    if (!args_ok) return invalid_arguments;
-    // TODO: add index check.
-
-    const memory_desc_t *md = memory->md();
-    // See caveats in the comment to `memory_desc_map_size()` function.
-    const size_t map_size = memory_desc_map_size(md, index);
-
-    if (map_size == 0) {
-        *mapped_ptr = nullptr;
-        return success;
-    } else if (map_size == DNNL_RUNTIME_SIZE_VAL) {
-        return invalid_arguments;
-    }
-
-    return memory->memory_storage(index)->map_data(
-            mapped_ptr, nullptr, map_size);
-
-    return unimplemented;
-}
-
-status_t dnnl_memory_unmap_data_sparse(
-        const_dnnl_memory_t memory, int index, void *mapped_ptr) {
-    bool args_ok = !any_null(memory);
-    if (!args_ok) return invalid_arguments;
-
-    return memory->memory_storage(index)->unmap_data(mapped_ptr, nullptr);
-
-    return unimplemented;
-}
-
 status_t dnnl_memory_destroy(memory_t *memory) {
     delete memory;
     return success;
diff --git a/src/common/memory.hpp b/src/common/memory.hpp
index ff480e2dc99..5e0791a4023 100644
--- a/src/common/memory.hpp
+++ b/src/common/memory.hpp
@@ -1,5 +1,5 @@
 /*******************************************************************************
-* Copyright 2018-2022 Intel Corporation
+* Copyright 2018-2021 Intel Corporation
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
@@ -40,9 +40,6 @@ enum memory_flags_t { alloc = 0x1, use_runtime_ptr = 0x2 };
 struct dnnl_memory : public dnnl::impl::c_compatible {
     /** XXX: Parameter flags must contain either alloc or use_runtime_ptr from
      * memory_flags_t. */
-    dnnl_memory(dnnl::impl::engine_t *engine,
-            const dnnl::impl::memory_desc_t *md,
-            const std::vector<unsigned> &flags, std::vector<void *> &handles);
     dnnl_memory(dnnl::impl::engine_t *engine,
             const dnnl::impl::memory_desc_t *md, unsigned flags, void *handle);
     dnnl_memory(dnnl::impl::engine_t *engine,
@@ -55,61 +52,27 @@ struct dnnl_memory : public dnnl::impl::c_compatible {
     /** returns memory's description */
     const dnnl::impl::memory_desc_t *md() const { return &md_; }
     /** returns the underlying memory storage */
-    dnnl::impl::memory_storage_t *memory_storage(int index = 0) const {
-        if (index >= (int)memory_storages_.size()) return nullptr;
-        return memory_storages_[index].get();
+    dnnl::impl::memory_storage_t *memory_storage() const {
+        return memory_storage_.get();
     }
     /** returns the underlying memory storage */
     dnnl::impl::memory_storage_t *memory_storage_clean(
             const dnnl::impl::exec_ctx_t &ctx,
             dnnl::impl::status_t &status) const {
         status = zero_pad(ctx);
-        return memory_storages_[0].get();
+        return memory_storage_.get();
     }
     /** returns the underlying memory storage */
     dnnl::impl::memory_storage_t *memory_storage_clean(
             const dnnl::impl::exec_ctx_t &ctx) const {
         zero_pad(ctx);
-        return memory_storages_[0].get();
+        return memory_storage_.get();
     }
     /** returns data handle */
     dnnl::impl::status_t get_data_handle(void **handle) const {
         return memory_storage()->get_data_handle(handle);
     }
 
-    dnnl::impl::status_t get_data_handles(std::vector<void *> &handles) const {
-        std::vector<void *> handles_tmp(memory_storages_.size());
-        handles = std::vector<void *>(memory_storages_.size());
-        for (size_t i = 0; i < memory_storages_.size(); i++) {
-            CHECK(memory_storage(i)->get_data_handle(&handles_tmp[i]));
-        }
-        handles = std::move(handles_tmp);
-        return dnnl::impl::status::success;
-    }
-
-    dnnl::impl::status_t set_data_handles(
-            std::vector<void *> handles, dnnl_stream *stream) {
-        if (handles.size() != memory_storages_.size())
-            return dnnl::impl::status::invalid_arguments;
-
-        auto status = dnnl::impl::status::success;
-        std::vector<void *> current_handles(handles.size());
-
-        for (size_t i = 0; i < handles.size(); i++) {
-            memory_storage(i)->get_data_handle(&current_handles[i]);
-            status = memory_storage(i)->set_data_handle(handles[i]);
-            if (status != dnnl::impl::status::success) {
-                // Restore the changed handles.
-                for (size_t j = 0; j < i; j++) {
-                    CHECK(memory_storage(j)->set_data_handle(
-                            current_handles[j]));
-                }
-                break;
-            }
-        }
-        return status;
-    }
-
     /** sets data handle */
     dnnl::impl::status_t set_data_handle(void *handle, dnnl_stream *stream, bool pads_zeroing);
 
@@ -119,8 +82,6 @@ struct dnnl_memory : public dnnl::impl::c_compatible {
     dnnl::impl::status_t reset_memory_storage(
             std::unique_ptr<dnnl::impl::memory_storage_t> &&memory_storage);
 
-    size_t get_num_handles() const { return memory_storages_.size(); }
-
 protected:
     dnnl::impl::engine_t *engine_;
     const dnnl::impl::memory_desc_t md_;
@@ -129,8 +90,7 @@ struct dnnl_memory : public dnnl::impl::c_compatible {
     dnnl_memory() = delete;
     DNNL_DISALLOW_COPY_AND_ASSIGN(dnnl_memory);
 
-    // Number of storages is larger than 1 only for sparse memory.
-    std::vector<std::unique_ptr<dnnl::impl::memory_storage_t>> memory_storages_;
+    std::unique_ptr<dnnl::impl::memory_storage_t> memory_storage_;
 };
 
 #endif
diff --git a/src/common/memory_desc_wrapper.hpp b/src/common/memory_desc_wrapper.hpp
index 5873599238f..b1282ccca93 100644
--- a/src/common/memory_desc_wrapper.hpp
+++ b/src/common/memory_desc_wrapper.hpp
@@ -88,18 +88,8 @@ struct memory_desc_wrapper : public c_compatible {
         return md_->format_desc.sparse_desc;
     }
 
-    const data_type_t *metadata_types() const {
-        assert(is_sparse_desc());
-        return sparse_desc().metadata_types;
-    }
-
     const memory_extra_desc_t &extra() const { return md_->extra; }
 
-    const dim_t *entry_dims() const {
-        assert(is_sparse_desc());
-        return sparse_desc().entry_dims;
-    }
-
     sparse_encoding_t encoding() const {
         assert(is_sparse_desc());
         return sparse_desc().encoding;
@@ -202,17 +192,11 @@ struct memory_desc_wrapper : public c_compatible {
 
     /** returns the size required to store described memory
      * note: if offset0 != 0 returns 0 (need to specify the behavior) */
-    size_t size(int index = 0) const {
+    size_t size() const {
         if (utils::one_of(format_kind(), format_kind::undef, format_kind::any)
                 || is_zero() || has_zero_dim())
             return 0;
 
-        if (utils::one_of(format_kind(), format_kind::blocked,
-                    format_kind::wino, format_kind::rnn_packed)
-                && index > 0) {
-            return 0;
-        }
-
         if (has_runtime_dims_or_strides()) return DNNL_RUNTIME_SIZE_VAL;
 
         if (format_kind() == format_kind::wino) {
@@ -220,48 +204,7 @@ struct memory_desc_wrapper : public c_compatible {
         } else if (format_kind() == format_kind::rnn_packed) {
             return rnn_packed_desc().size;
         } else if (is_sparse_desc()) {
-            if (utils::one_of(sparse_desc().encoding, sparse_encoding::csr,
-                        sparse_encoding::csc, sparse_encoding::bcsr,
-                        sparse_encoding::bcsc)) {
-
-                const size_t nnze = sparse_desc().nnze;
-
-                const auto idx_dt = metadata_types()[0];
-                const auto ptr_dt = metadata_types()[1];
-
-                // Return size for values.
-                if (index == 0) {
-                    switch (sparse_desc().encoding) {
-                        case sparse_encoding::csr:
-                        case sparse_encoding::csc:
-                            return nnze * data_type_size();
-                        case sparse_encoding::bcsr:
-                        case sparse_encoding::bcsc:
-                            return nnze * entry_dims()[0] * entry_dims()[1]
-                                    * data_type_size();
-                        default: assert(!"unknown sparse encoding"); return 0;
-                    }
-                }
-
-                // Return size for indices.
-                if (index == 1) return nnze * types::data_type_size(idx_dt);
-                // Return size for pointers.
-                if (index == 2) {
-                    switch (sparse_desc().encoding) {
-                        case sparse_encoding::csr:
-                        case sparse_encoding::bcsr:
-                            return (dims()[0] + 1)
-                                    * types::data_type_size(ptr_dt);
-                        case sparse_encoding::csc:
-                        case sparse_encoding::bcsc:
-                            return (dims()[1] + 1)
-                                    * types::data_type_size(ptr_dt);
-                        default: assert(!"unknown sparse encoding"); return 0;
-                    }
-                }
-                return 0;
-            } else if (sparse_desc().encoding == sparse_encoding::packed) {
-                if (index != 0) return 0;
+            if (sparse_desc().encoding == sparse_encoding::packed) {
                 // Only  2D tensors are supported at this point.
                 assert(ndims() == 2);
                 // Only OI16i64o4i is supported at this point.
diff --git a/src/common/primitive.hpp b/src/common/primitive.hpp
index caf1da34b56..3cfe44e8279 100644
--- a/src/common/primitive.hpp
+++ b/src/common/primitive.hpp
@@ -194,19 +194,15 @@ struct nested_scratchpad_t {
 
 #define CTX_IN_MEM(type, arg) \
     static_cast<const ARG_TYPE(type) *>(ctx.host_ptr(arg))
-#define CTX_IN_SPARSE_MEM(type, arg, idx) \
-    static_cast<const ARG_TYPE(type) *>(ctx.host_ptr(arg, idx))
 
 // Returns destination memory which may not have been zero pad initialized.
 #define CTX_OUT_MEM(type, arg) static_cast<ARG_TYPE(type) *>(ctx.host_ptr(arg))
-#define CTX_OUT_SPARSE_MEM(type, arg, idx) \
-    static_cast<ARG_TYPE(type) *>(ctx.host_ptr(arg, idx))
 
 // Returns destination memory which has been zero pad initialized. This macro
 // may result in a failure returned via the `status` input since zero pad
 // may fail.
 #define CTX_OUT_CLEAN_MEM(type, arg, status) \
-    static_cast<ARG_TYPE(type) *>(ctx.host_ptr(arg, 0, true, &status))
+    static_cast<ARG_TYPE(type) *>(ctx.host_ptr(arg, true, &status))
 
 #define CTX_IN_BATCH(arg) \
     ctx.input(arg) ? ctx.input(arg)->md()->ndims != 0 ? ctx.input(arg)->md()->dims[0] : 0 : 0
diff --git a/src/common/primitive_exec_types.cpp b/src/common/primitive_exec_types.cpp
index 9c5b1008af0..be61fe4b245 100644
--- a/src/common/primitive_exec_types.cpp
+++ b/src/common/primitive_exec_types.cpp
@@ -99,8 +99,7 @@ void exec_ctx_t::register_memory_mapping(void *handle, void *host_ptr) {
     memory_mapping_.insert({handle, host_ptr});
 }
 
-void *exec_ctx_t::host_ptr(
-        int arg, int index, bool do_zeropad, status_t *status_) const {
+void *exec_ctx_t::host_ptr(int arg, bool do_zeropad, status_t *status_) const {
     status_t status = status::success;
     if (status_) *status_ = status;
 
@@ -110,7 +109,7 @@ void *exec_ctx_t::host_ptr(
     if (do_zeropad) status = mem->zero_pad(*this);
     if (status_) *status_ = status;
 
-    auto *mem_storage = mem->memory_storage(index);
+    auto *mem_storage = mem->memory_storage();
     return host_ptr(mem_storage);
 }
 
diff --git a/src/common/primitive_exec_types.hpp b/src/common/primitive_exec_types.hpp
index 3cf128e2ed9..92b8fecdd74 100644
--- a/src/common/primitive_exec_types.hpp
+++ b/src/common/primitive_exec_types.hpp
@@ -1,5 +1,5 @@
 /*******************************************************************************
-* Copyright 2018-2022 Intel Corporation
+* Copyright 2018-2021 Intel Corporation
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
@@ -83,8 +83,8 @@ struct exec_ctx_t {
 
     void register_memory_mapping(void *handle, void *host_ptr);
 
-    void *host_ptr(int arg, int index = 0, bool do_zeropad = false,
-            status_t *status = nullptr) const;
+    void *host_ptr(
+            int arg, bool do_zeropad = false, status_t *status = nullptr) const;
     void *host_ptr(const memory_storage_t *mem_storage) const;
 
     void *map_memory_storage(const memory_storage_t *storage, stream_t *stream,
diff --git a/src/common/primitive_hashing_utils.cpp b/src/common/primitive_hashing_utils.cpp
index 0e22b4439bf..cbf8c7b3278 100644
--- a/src/common/primitive_hashing_utils.cpp
+++ b/src/common/primitive_hashing_utils.cpp
@@ -81,22 +81,6 @@ size_t get_md_hash(const memory_desc_t &md) {
         case format_kind::sparse:
             seed = hash_combine(seed,
                     static_cast<size_t>(md.format_desc.sparse_desc.encoding));
-            seed = get_array_hash(seed, md.format_desc.sparse_desc.dims_order,
-                    DNNL_MAX_NDIMS);
-            seed = hash_combine(seed, md.format_desc.sparse_desc.nnze);
-            seed = get_array_hash(seed,
-                    md.format_desc.sparse_desc.metadata_types,
-                    DNNL_MAX_METADATA_TYPES);
-            seed = get_array_hash(
-                    seed, md.format_desc.sparse_desc.entry_dims, 2);
-            seed = hash_combine(
-                    seed, md.format_desc.sparse_desc.structure_ndims);
-            if (md.format_desc.sparse_desc.structure_ndims != 0) {
-                seed = get_array_hash(
-                        seed, md.format_desc.sparse_desc.structure_dims, 2);
-                seed = get_array_hash(
-                        seed, md.format_desc.sparse_desc.structure_nnz, 2);
-            }
             // User cannot initialize `packed_desc` therefore therefore
             // at this point `packed_desc` is always zero initialized.
             break;
diff --git a/src/common/primitive_hashing_utils.hpp b/src/common/primitive_hashing_utils.hpp
index 89e8a92a134..361faee16f0 100644
--- a/src/common/primitive_hashing_utils.hpp
+++ b/src/common/primitive_hashing_utils.hpp
@@ -60,15 +60,6 @@ size_t get_vector_hash(size_t seed, const std::vector<T, A> &vec) {
     return get_array_hash(seed, vec.data(), vec.size());
 }
 
-template <>
-inline size_t get_array_hash<data_type_t>(
-        size_t seed, const data_type_t *v, int size) {
-    for (int i = 0; i < size; i++) {
-        seed = hash_combine(seed, static_cast<size_t>(v[i]));
-    }
-    return seed;
-}
-
 } // namespace primitive_hashing
 } // namespace impl
 } // namespace dnnl
diff --git a/src/common/type_helpers.hpp b/src/common/type_helpers.hpp
index 66ace3f0875..341c4d8666d 100644
--- a/src/common/type_helpers.hpp
+++ b/src/common/type_helpers.hpp
@@ -232,28 +232,7 @@ inline bool rnn_packed_desc_is_equal(
 
 inline bool sparse_desc_is_equal(
         const sparse_desc_t &lhs, const sparse_desc_t &rhs) {
-    bool ok = lhs.encoding == rhs.encoding && lhs.nnze == rhs.nnze
-            && lhs.structure_ndims == rhs.structure_ndims;
-    if (!ok) return false;
-
-    for (int i = 0; i < DNNL_MAX_NDIMS; i++)
-        ok = ok && lhs.dims_order[i] == rhs.dims_order[i];
-    if (!ok) return false;
-
-    for (int i = 0; i < DNNL_MAX_METADATA_TYPES; i++)
-        ok = ok && lhs.metadata_types[i] == rhs.metadata_types[i];
-    if (!ok) return false;
-
-    for (int i = 0; i < 2; i++)
-        ok = ok && lhs.entry_dims[i] == rhs.entry_dims[i];
-    if (!ok) return false;
-
-    if (lhs.structure_ndims != 0) {
-        for (int i = 0; i < 2; i++)
-            ok = ok && lhs.structure_dims[i] == rhs.structure_dims[i]
-                    && lhs.structure_nnz[i] == rhs.structure_nnz[i];
-    }
-    return ok;
+    return lhs.encoding == rhs.encoding;
 }
 
 inline memory_desc_t zero_md() {
diff --git a/src/common/utils.hpp b/src/common/utils.hpp
index 03b79cc6675..b31130dcc37 100644
--- a/src/common/utils.hpp
+++ b/src/common/utils.hpp
@@ -69,8 +69,8 @@ static_assert(sizeof(void *) == 8, "oneDNN supports 64-bit architectures only");
 
 #define CHECK(f) \
     do { \
-        dnnl::impl::status_t _status_ = f; \
-        if (_status_ != dnnl::impl::status::success) return _status_; \
+        status_t _status_ = f; \
+        if (_status_ != status::success) return _status_; \
     } while (0)
 
 #define IMPLICATION(cause, effect) (!(cause) || !!(effect))
diff --git a/src/cpu/reorder/simple_sparse_reorder.hpp b/src/cpu/reorder/simple_sparse_reorder.hpp
index fa5185f9cc4..3b38d642b46 100644
--- a/src/cpu/reorder/simple_sparse_reorder.hpp
+++ b/src/cpu/reorder/simple_sparse_reorder.hpp
@@ -112,7 +112,7 @@ struct simple_sparse_reorder_impl<SIMPLE_SPARSE_REORDER_TEMPL_CALL,
 
     static status_t execute(const cpu_reorder_pd_t *pd, const exec_ctx_t &ctx) {
         auto input = CTX_IN_MEM(const data_t<type_i> *, DNNL_ARG_FROM);
-        auto output = CTX_OUT_SPARSE_MEM(data_t<type_o> *, DNNL_ARG_TO, 0);
+        auto output = CTX_OUT_MEM(data_t<type_o> *, DNNL_ARG_TO);
 
         const auto input_d = ctx.memory_mdw(DNNL_ARG_FROM, pd->src_md());
         const auto output_d = ctx.memory_mdw(DNNL_ARG_TO, pd->dst_md());
diff --git a/src/cpu/x64/jit_brgemm_inner_product.cpp b/src/cpu/x64/jit_brgemm_inner_product.cpp
index 1d4be99f4b2..1dfcb34b770 100644
--- a/src/cpu/x64/jit_brgemm_inner_product.cpp
+++ b/src/cpu/x64/jit_brgemm_inner_product.cpp
@@ -54,7 +54,6 @@ void copy_data_chunk(ker_type &ker, char *tr_data, const char *data,
     ctx.last_row_blk = is_last_blk ? 1 : 0;
     (*ker)(&ctx);
 }
-
 } // namespace
 
 template <cpu_isa_t isa>
@@ -491,6 +490,7 @@ status_t brgemm_inner_product_fwd_t<isa>::execute_forward(
             }
         });
     }
+
     return status::success;
 }