Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Emit trunc nuw for unchecked shifts and to_immediate_scalar #137058

Merged
merged 3 commits into from
Feb 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions compiler/rustc_codegen_gcc/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -989,10 +989,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Ref(place.val)
} else if place.layout.is_gcc_immediate() {
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar);
}
OperandValue::Immediate(self.to_immediate(load, place.layout))
OperandValue::Immediate(
if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar);
self.to_immediate_scalar(load, *scalar)
} else {
load
},
)
} else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);

Expand Down Expand Up @@ -1694,7 +1698,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {

fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1());
return self.unchecked_utrunc(val, self.cx().type_i1());
}
val
}
Expand Down
11 changes: 8 additions & 3 deletions compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use gccjit::FunctionType;
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
#[cfg(feature = "master")]
use rustc_abi::ExternAbi;
use rustc_abi::HasDataLayout;
use rustc_abi::{BackendRepr, HasDataLayout};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::IntPredicate;
Expand Down Expand Up @@ -181,14 +181,19 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
let layout = self.layout_of(tp_ty);
let load = if let PassMode::Cast { cast: ref ty, pad_i32: _ } = fn_abi.ret.mode {
let gcc_ty = ty.gcc_type(self);
self.volatile_load(gcc_ty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
self.volatile_load(layout.gcc_type(self), ptr)
};
// TODO(antoyo): set alignment.
self.to_immediate(load, self.layout_of(tp_ty))
if let BackendRepr::Scalar(scalar) = layout.backend_repr {
self.to_immediate_scalar(load, scalar)
} else {
load
}
}
sym::volatile_store => {
let dst = args[0].deref(self.cx());
Expand Down
38 changes: 34 additions & 4 deletions compiler/rustc_codegen_llvm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,13 @@ use smallvec::SmallVec;
use tracing::{debug, instrument};

use crate::abi::FnAbiLlvmExt;
use crate::attributes;
use crate::common::Funclet;
use crate::context::{CodegenCx, SimpleCx};
use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, Metadata, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use crate::{attributes, llvm_util};

#[must_use]
pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SimpleCx<'ll>>> {
Expand Down Expand Up @@ -606,7 +606,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {

fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1());
return self.unchecked_utrunc(val, self.cx().type_i1());
}
val
}
Expand Down Expand Up @@ -746,10 +746,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let load = self.load(llty, place.val.llval, place.val.align);
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
self.to_immediate_scalar(load, scalar)
} else {
load
}
load
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
OperandValue::Immediate(llval)
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
let b_offset = a.size(self).align_to(b.align(self).abi);

Expand Down Expand Up @@ -942,6 +944,34 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
}

fn unchecked_utrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
debug_assert_ne!(self.val_ty(val), dest_ty);

let trunc = self.trunc(val, dest_ty);
if llvm_util::get_version() >= (19, 0, 0) {
unsafe {
if llvm::LLVMIsAInstruction(trunc).is_some() {
llvm::LLVMSetNUW(trunc, True);
}
}
}
trunc
}

fn unchecked_strunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
debug_assert_ne!(self.val_ty(val), dest_ty);

let trunc = self.trunc(val, dest_ty);
if llvm_util::get_version() >= (19, 0, 0) {
unsafe {
if llvm::LLVMIsAInstruction(trunc).is_some() {
llvm::LLVMSetNSW(trunc, True);
}
}
}
trunc
}

fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
}
Expand Down
10 changes: 2 additions & 8 deletions compiler/rustc_codegen_ssa/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::Session;
use rustc_session::config::{self, CrateType, EntryFnType, OptLevel, OutputType};
use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
use rustc_span::{DUMMY_SP, Symbol, sym};
use rustc_trait_selection::infer::{BoundRegionConversionTime, TyCtxtInferExt};
use rustc_trait_selection::traits::{ObligationCause, ObligationCtxt};
Expand Down Expand Up @@ -364,13 +364,7 @@ pub(crate) fn build_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
if is_unchecked && bx.sess().opts.optimize != OptLevel::No {
// FIXME: Use `trunc nuw` once that's available
let inrange = bx.icmp(IntPredicate::IntULE, rhs, mask);
bx.assume(inrange);
}

bx.trunc(rhs, lhs_llty)
if is_unchecked { bx.unchecked_utrunc(rhs, lhs_llty) } else { bx.trunc(rhs, lhs_llty) }
} else if lhs_sz > rhs_sz {
// We zero-extend even if the RHS is signed. So e.g. `(x: i32) << -1i8` will zero-extend the
// RHS to `255i32`. But then we mask the shift amount to be within the size of the LHS
Expand Down
10 changes: 5 additions & 5 deletions compiler/rustc_codegen_ssa/src/mir/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1040,7 +1040,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (idx, _) = op.layout.non_1zst_field(bx).expect(
"not exactly one non-1-ZST field in a `DispatchFromDyn` type",
);
op = op.extract_field(bx, idx);
op = op.extract_field(self, bx, idx);
}

// Now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
Expand Down Expand Up @@ -1072,7 +1072,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (idx, _) = op.layout.non_1zst_field(bx).expect(
"not exactly one non-1-ZST field in a `DispatchFromDyn` type",
);
op = op.extract_field(bx, idx);
op = op.extract_field(self, bx, idx);
}

// Make sure that we've actually unwrapped the rcvr down
Expand Down Expand Up @@ -1572,9 +1572,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if scalar.is_bool() {
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
}
// We store bools as `i8` so we need to truncate to `i1`.
llval = bx.to_immediate_scalar(llval, scalar);
}
// We store bools as `i8` so we need to truncate to `i1`.
llval = bx.to_immediate(llval, arg.layout);
}
}

Expand Down Expand Up @@ -1604,7 +1604,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
// If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() {
let op = tuple.extract_field(bx, i);
let op = tuple.extract_field(self, bx, i);
self.codegen_argument(bx, op, llargs, &args[i]);
}
}
Expand Down
131 changes: 67 additions & 64 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
use std::assert_matches::assert_matches;
use std::fmt;

use arrayvec::ArrayVec;
use either::Either;
use rustc_abi as abi;
use rustc_abi::{Align, BackendRepr, Size};
use rustc_middle::bug;
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
use rustc_middle::mir::{self, ConstValue};
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::{bug, span_bug};
use tracing::debug;

use super::place::{PlaceRef, PlaceValue};
Expand Down Expand Up @@ -352,79 +351,83 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {

pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx,
i: usize,
) -> Self {
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);

let mut val = match (self.val, self.layout.backend_repr) {
// If the field is ZST, it has no data.
_ if field.is_zst() => OperandValue::ZeroSized,

// Newtype of a scalar, scalar pair or vector.
(OperandValue::Immediate(_) | OperandValue::Pair(..), _)
if field.size == self.layout.size =>
{
assert_eq!(offset.bytes(), 0);
self.val
let val = if field.is_zst() {
OperandValue::ZeroSized
} else if field.size == self.layout.size {
assert_eq!(offset.bytes(), 0);
if let Some(field_val) = fx.codegen_transmute_operand(bx, *self, field) {
field_val
} else {
// we have to go through memory for things like
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
let place = PlaceRef::alloca(bx, field);
self.val.store(bx, place.val.with_type(self.layout));
bx.load_operand(place).val
}

// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.size(bx.cx()));
OperandValue::Immediate(a_llval)
} else {
assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
assert_eq!(field.size, b.size(bx.cx()));
OperandValue::Immediate(b_llval)
} else {
let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.size(bx.cx()));
(Some(a), a_llval)
} else {
assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
assert_eq!(field.size, b.size(bx.cx()));
(Some(b), b_llval)
}
}
}

// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
(None, bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}

_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
_ => {
span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self)
}
};
OperandValue::Immediate(match field.backend_repr {
BackendRepr::Vector { .. } => imm,
BackendRepr::Scalar(out_scalar) => {
let Some(in_scalar) = in_scalar else {
span_bug!(
fx.mir.span,
"OperandRef::extract_field({:?}): missing input scalar for output scalar",
self
)
};
if in_scalar != out_scalar {
// If the backend and backend_immediate types might differ,
// flip back to the backend type then to the new immediate.
// This avoids nop truncations, but still handles things like
// Bools in union fields needs to be truncated.
let backend = bx.from_immediate(imm);
bx.to_immediate_scalar(backend, out_scalar)
} else {
imm
}
}
BackendRepr::Memory { sized: true } => {
span_bug!(
fx.mir.span,
"Projecting into a simd type with padding doesn't work; \
See <https://github.com/rust-lang/rust/issues/137108>",
);
}
BackendRepr::Uninhabited
| BackendRepr::ScalarPair(_, _)
| BackendRepr::Memory { sized: false } => bug!(),
})
};

match (&mut val, field.backend_repr) {
(OperandValue::ZeroSized, _) => {}
(
OperandValue::Immediate(llval),
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
}
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });

let llfield_ty = bx.cx().backend_type(field);

// Can't bitcast an aggregate, so round trip through memory.
let llptr = bx.alloca(field.size, field.align.abi);
bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, llptr, field.align.abi);
}
(
OperandValue::Immediate(_),
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
) => {
bug!()
}
(OperandValue::Pair(..), _) => bug!(),
(OperandValue::Ref(..), _) => bug!(),
}

OperandRef { val, layout: field }
}
}
Expand Down Expand Up @@ -587,7 +590,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {f:?} of pointer {o:?}",
);
o = o.extract_field(bx, f.index());
o = o.extract_field(self, bx, f.index());
}
mir::ProjectionElem::Index(_)
| mir::ProjectionElem::ConstantIndex { .. } => {
Expand Down
4 changes: 3 additions & 1 deletion compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
///
/// Returns `None` for cases that can't work in that framework, such as for
/// `Immediate`->`Ref` that needs an `alloc` to get the location.
fn codegen_transmute_operand(
pub(crate) fn codegen_transmute_operand(
&mut self,
bx: &mut Bx,
operand: OperandRef<'tcx, Bx::Value>,
Expand Down Expand Up @@ -260,6 +260,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Ref(source_place_val) => {
assert_eq!(source_place_val.llextra, None);
assert_matches!(operand_kind, OperandValueKind::Ref);
// The existing alignment is part of `source_place_val`,
// so that alignment will be used, not `cast`'s.
Some(bx.load_operand(source_place_val.with_type(cast)).val)
}
OperandValue::ZeroSized => {
Expand Down
Loading
Loading