Skip to content

Commit

Permalink
PR #21960: Updated nanobind commit
Browse files Browse the repository at this point in the history
Imported from GitHub PR openxla/xla#21960

Point nanobind to the commit fixing python/c++ object concurrent accessing: wjakob/nanobind#867

cc @hawkinsp
Copybara import of the project:

--
77e693fb39e0b737016770585c3f8786eb141474 by vfdev-5 <vfdev.5@gmail.com>:

Updated nanobind commit

Merging this change closes #21960

FUTURE_COPYBARA_INTEGRATE_REVIEW=openxla/xla#21960 from vfdev-5:update-nanobind 77e693fb39e0b737016770585c3f8786eb141474
PiperOrigin-RevId: 720567148
  • Loading branch information
vfdev-5 authored and tensorflower-gardener committed Jan 29, 2025
1 parent e333034 commit 75cde9b
Show file tree
Hide file tree
Showing 18 changed files with 460 additions and 268 deletions.
2 changes: 2 additions & 0 deletions tensorflow/compiler/tf2xla/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ filegroup(
"@local_xla//xla/backends/cpu:xla_cpu_runtime_hdrs",
"@local_xla//xla/service:custom_call_status_hdrs",
"@local_xla//xla/service/cpu:runtime_hdrs",
"@local_xla//xla/tsl/concurrency:xla_cpu_runtime_hdrs",
"@local_xla//xla/tsl/framework:xla_cpu_runtime_hdrs",
"@local_xla//xla/tsl/framework/fixedpoint:xla_cpu_runtime_hdrs",
],
Expand All @@ -242,6 +243,7 @@ filegroup(
"@local_xla//xla:cpu_runtime_srcs",
"@local_xla//xla/service:custom_call_status_srcs",
"@local_xla//xla/service/cpu:runtime_srcs",
"@local_xla//xla/tsl/concurrency:xla_cpu_runtime_srcs",
],
visibility = [
"//tensorflow/tools/pip_package:__pkg__",
Expand Down
6 changes: 3 additions & 3 deletions third_party/nanobind/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
tf_http_archive(
name = "nanobind",
strip_prefix = "nanobind-cee104db8606797a63752d2904b2f2795014a125",
sha256 = "d5dec3690c0a11b1ca48021ff34238886da7938b7bbbd5c0e946dcef6e6b7e25",
urls = tf_mirror_urls("https://github.com/wjakob/nanobind/archive/cee104db8606797a63752d2904b2f2795014a125.tar.gz"),
strip_prefix = "nanobind-d79309197caaad83cda05df533136865d294f01e",
sha256 = "598b116f36dbdf9738bb269cc1551ae073715fb3d69f07ca0dd01e6de0ddf4b0",
urls = tf_mirror_urls("https://github.com/wjakob/nanobind/archive/d79309197caaad83cda05df533136865d294f01e.tar.gz"),
build_file = "//third_party/nanobind:nanobind.BUILD",
)
6 changes: 3 additions & 3 deletions third_party/xla/third_party/nanobind/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
tf_http_archive(
name = "nanobind",
strip_prefix = "nanobind-cee104db8606797a63752d2904b2f2795014a125",
sha256 = "d5dec3690c0a11b1ca48021ff34238886da7938b7bbbd5c0e946dcef6e6b7e25",
urls = tf_mirror_urls("https://github.com/wjakob/nanobind/archive/cee104db8606797a63752d2904b2f2795014a125.tar.gz"),
strip_prefix = "nanobind-d79309197caaad83cda05df533136865d294f01e",
sha256 = "598b116f36dbdf9738bb269cc1551ae073715fb3d69f07ca0dd01e6de0ddf4b0",
urls = tf_mirror_urls("https://github.com/wjakob/nanobind/archive/d79309197caaad83cda05df533136865d294f01e.tar.gz"),
build_file = "//third_party/nanobind:nanobind.BUILD",
)
20 changes: 19 additions & 1 deletion third_party/xla/xla/backends/cpu/runtime/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,21 @@ xla_cc_test(
],
)

cc_library(
name = "convolution_lib",
srcs = ["convolution_lib.cc"],
hdrs = ["convolution_lib.h"],
deps = [
"//xla:shape_util",
"//xla/runtime:buffer_use",
"//xla/service:buffer_assignment",
"@com_google_absl//absl/container:inlined_vector",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:span",
],
)

cc_library(
name = "convolution_thunk_internal",
srcs = [
Expand All @@ -382,10 +397,11 @@ cc_library(
visibility = internal_visibility([":friends"]),
deps = [
":concurrency",
"//xla/tsl/concurrency:async_value",
"//xla/tsl/framework/contraction:eigen_contraction_kernel",
"//xla/tsl/framework/convolution:eigen_helpers",
"//xla/tsl/platform:logging",
"@eigen_archive//:eigen3",
"@local_tsl//tsl/platform:logging",
],
)

Expand All @@ -395,6 +411,7 @@ cc_library(
hdrs = ["convolution_thunk.h"],
copts = runtime_copts(),
deps = [
":convolution_lib",
":convolution_thunk_internal",
":thunk",
"//xla:executable_run_options",
Expand Down Expand Up @@ -1146,6 +1163,7 @@ cc_library(
":collective_permute_thunk",
":collective_thunk",
":conditional_thunk",
":convolution_lib",
":convolution_thunk",
":copy_thunk",
":custom_call_thunk",
Expand Down
36 changes: 36 additions & 0 deletions third_party/xla/xla/backends/cpu/runtime/convolution_lib.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/* Copyright 2025 The OpenXLA Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "xla/backends/cpu/runtime/convolution_lib.h"

#include <cstdint>

#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"

namespace xla::cpu {

absl::InlinedVector<BufferUse, 4> ConvolutionBufferUses(
const ConvolutionSlices& slices) {
return {BufferUse::Read(slices.input_buffer),
BufferUse::Read(slices.kernel_buffer),
BufferUse::Write(slices.output_buffer)};
}

ConvolutionCanonicalDims::Dims::Dims(absl::Span<const int64_t> dims)
: rank(dims.size()), x(dims[0]), y(dims[1]), z(rank == 3 ? dims[2] : 0) {}

} // namespace xla::cpu
120 changes: 120 additions & 0 deletions third_party/xla/xla/backends/cpu/runtime/convolution_lib.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
/* Copyright 2025 The OpenXLA Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#ifndef XLA_BACKENDS_CPU_RUNTIME_CONVOLUTION_LIB_H_
#define XLA_BACKENDS_CPU_RUNTIME_CONVOLUTION_LIB_H_

#include <cstddef>
#include <cstdint>

#include "absl/container/inlined_vector.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape.h"

namespace xla::cpu {

// Allocation slices of the convolution operation.
struct ConvolutionSlices {
BufferAllocation::Slice input_buffer;
Shape input_shape;

BufferAllocation::Slice kernel_buffer;
Shape kernel_shape;

BufferAllocation::Slice output_buffer;
Shape output_shape;
};

// Returns buffer uses of the dot operation.
absl::InlinedVector<BufferUse, 4> ConvolutionBufferUses(
const ConvolutionSlices& slices);

// Convolution dimensions in canonical form inferred from the operands shapes
// and convolution parameters.
struct ConvolutionCanonicalDims {
// A helper struct to store the x, y and z dimensions of a tensor, introduced
// for readability. In case of 2D convolution, only the x and y dimensions are
// used and z is set to 0.
struct Dims {
explicit Dims(absl::Span<const int64_t> dims);

template <typename Sink>
friend void AbslStringify(Sink& sink, const Dims& d);

int64_t rank;
int64_t x;
int64_t y;
int64_t z;
};

template <typename Sink>
friend void AbslStringify(Sink& sink, const ConvolutionCanonicalDims& d);

size_t convolution_rank() const { return input_dims.rank; }

int64_t input_batch;
Dims input_dims;
int64_t input_channels;

Dims kernel_dims;
int64_t kernel_channels;
int64_t kernel_filters;

Dims output_dims;

Dims strides;
Dims padding_before;
Dims padding_after;
Dims base_dilation;
Dims window_dilation;

int64_t feature_group_count;
};

template <typename Sink>
void AbslStringify(Sink& sink, const ConvolutionCanonicalDims::Dims& d) {
switch (d.rank) {
case 2:
absl::Format(&sink, "[%d,%d]", d.x, d.y);
break;
case 3:
absl::Format(&sink, "[%d,%d,%d]", d.x, d.y, d.z);
break;
default:
absl::Format(&sink, "[invalid rank %d]", d.rank);
}
}

template <typename Sink>
void AbslStringify(Sink& sink, const ConvolutionCanonicalDims& d) {
absl::Format(&sink,
"convolution_rank=%d input_batch=%d input_dims=%v "
"input_channels=%d kernel_dims=%v kernel_channels=%d "
"kernel_filters=%d output_dims=%v strides=%v padding_before=%v "
"padding_after=%v base_dilation=%v window_dilation=%v "
"feature_group_count=%d",
d.convolution_rank(), d.input_batch, d.input_dims,
d.input_channels, d.kernel_dims, d.kernel_channels,
d.kernel_filters, d.output_dims, d.strides, d.padding_before,
d.padding_after, d.base_dilation, d.window_dilation,
d.feature_group_count);
}

} // namespace xla::cpu

#endif // XLA_BACKENDS_CPU_RUNTIME_CONVOLUTION_LIB_H_
Loading

0 comments on commit 75cde9b

Please sign in to comment.