Skip to content

Commit

Permalink
Merge pull request rust-lang#274 from RalfJung/packed2
Browse files Browse the repository at this point in the history
make force_allocation handle packed ByValPair
  • Loading branch information
oli-obk authored Jul 31, 2017
2 parents f906c54 + 4458001 commit 7c6befe
Show file tree
Hide file tree
Showing 9 changed files with 187 additions and 139 deletions.
120 changes: 75 additions & 45 deletions src/librustc_mir/interpret/eval_context.rs

Large diffs are not rendered by default.

14 changes: 7 additions & 7 deletions src/librustc_mir/interpret/lvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
match lvalue {
Lvalue::Ptr { ptr, extra, aligned } => {
assert_eq!(extra, LvalueExtra::None);
Ok(Value::ByRef(ptr, aligned))
Ok(Value::ByRef { ptr, aligned })
}
Lvalue::Local { frame, local } => {
self.stack[frame].get_local(local)
Expand Down Expand Up @@ -305,7 +305,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
return Ok(base);
},
Value::ByRef(..) |
Value::ByRef{..} |
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
},
Expand All @@ -315,7 +315,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
return Ok(base);
},
Value::ByRef(..) |
Value::ByRef{..} |
Value::ByValPair(..) |
Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
},
Expand Down Expand Up @@ -349,17 +349,17 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
}

pub(super) fn val_to_lvalue(&mut self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.into_slice(&mut self.memory)?;
let (ptr, len) = val.into_slice(&self.memory)?;
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
},
_ => Lvalue::Ptr { ptr: val.into_ptr(&mut self.memory)?, extra: LvalueExtra::None, aligned: true },
_ => Lvalue::Ptr { ptr: val.into_ptr(&self.memory)?, extra: LvalueExtra::None, aligned: true },
})
}

Expand Down
52 changes: 33 additions & 19 deletions src/librustc_mir/interpret/memory.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
use std::{fmt, iter, ptr, mem, io, ops};
use std::cell::Cell;

use rustc::ty;
use rustc::ty::layout::{self, TargetDataLayout, HasDataLayout};
Expand Down Expand Up @@ -266,8 +267,8 @@ pub struct Memory<'a, 'tcx> {

/// To avoid having to pass flags to every single memory access, we have some global state saying whether
/// alignment checking is currently enforced for read and/or write accesses.
reads_are_aligned: bool,
writes_are_aligned: bool,
reads_are_aligned: Cell<bool>,
writes_are_aligned: Cell<bool>,

/// The current stack frame. Used to check accesses against locks.
cur_frame: usize,
Expand All @@ -287,8 +288,8 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
literal_alloc_cache: HashMap::new(),
thread_local: BTreeMap::new(),
next_thread_local: 0,
reads_are_aligned: true,
writes_are_aligned: true,
reads_are_aligned: Cell::new(true),
writes_are_aligned: Cell::new(true),
cur_frame: usize::max_value(),
}
}
Expand Down Expand Up @@ -796,7 +797,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
impl<'a, 'tcx> Memory<'a, 'tcx> {
fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
if self.reads_are_aligned {
if self.reads_are_aligned.get() {
self.check_align(ptr.into(), align)?;
}
if size == 0 {
Expand All @@ -813,7 +814,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {

fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
if self.writes_are_aligned {
if self.writes_are_aligned.get() {
self.check_align(ptr.into(), align)?;
}
if size == 0 {
Expand Down Expand Up @@ -909,10 +910,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
if size == 0 {
// Empty accesses don't need to be valid pointers, but they should still be aligned
if self.reads_are_aligned {
if self.reads_are_aligned.get() {
self.check_align(src, align)?;
}
if self.writes_are_aligned {
if self.writes_are_aligned.get() {
self.check_align(dest, align)?;
}
return Ok(());
Expand Down Expand Up @@ -968,7 +969,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
if size == 0 {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.reads_are_aligned {
if self.reads_are_aligned.get() {
self.check_align(ptr, 1)?;
}
return Ok(&[]);
Expand All @@ -979,7 +980,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
if src.is_empty() {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.writes_are_aligned {
if self.writes_are_aligned.get() {
self.check_align(ptr, 1)?;
}
return Ok(());
Expand All @@ -992,7 +993,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
if count == 0 {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
if self.writes_are_aligned {
if self.writes_are_aligned.get() {
self.check_align(ptr, 1)?;
}
return Ok(());
Expand Down Expand Up @@ -1399,23 +1400,36 @@ pub(crate) trait HasMemory<'a, 'tcx> {
fn memory(&self) -> &Memory<'a, 'tcx>;

// These are not supposed to be overriden.
fn read_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where F: FnOnce(&Self) -> EvalResult<'tcx, T>
{
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let t = f(self);
self.memory().reads_are_aligned.set(old);
t
}

fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
{
assert!(self.memory_mut().reads_are_aligned, "Unaligned reads must not be nested");
self.memory_mut().reads_are_aligned = aligned;
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let t = f(self);
self.memory_mut().reads_are_aligned = true;
self.memory().reads_are_aligned.set(old);
t
}

fn write_maybe_aligned<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
where F: FnOnce(&mut Self) -> EvalResult<'tcx, T>
{
assert!(self.memory_mut().writes_are_aligned, "Unaligned writes must not be nested");
self.memory_mut().writes_are_aligned = aligned;
let old = self.memory().writes_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().writes_are_aligned.set(old && aligned);
let t = f(self);
self.memory_mut().writes_are_aligned = true;
self.memory().writes_are_aligned.set(old);
t
}
}
Expand Down
12 changes: 7 additions & 5 deletions src/librustc_mir/interpret/step.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,14 @@ use rustc::ty;
use rustc::ty::layout::Layout;
use rustc::ty::subst::Substs;

use syntax::codemap::Span;
use syntax::ast::Mutability;

use error::{EvalResult, EvalError};
use eval_context::{EvalContext, StackPopCleanup};
use eval_context::{EvalContext, StackPopCleanup, TyAndPacked};
use lvalue::{Global, GlobalId, Lvalue};
use value::{Value, PrimVal};
use syntax::codemap::Span;
use syntax::ast::Mutability;
use memory::HasMemory;

impl<'a, 'tcx> EvalContext<'a, 'tcx> {
pub fn inc_step_counter_and_check_limit(&mut self, n: u64) -> EvalResult<'tcx> {
Expand Down Expand Up @@ -101,12 +103,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

Layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
if variant_index as u64 != nndiscr {
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(offset.bytes(), &self)?;
trace!("struct wrapped nullable pointer type: {}", ty);
// only the pointer part of a fat pointer is used for this space optimization
let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
self.memory.write_uint(nonnull, 0, discr_size)?;
self.write_maybe_aligned_mut(!packed, |ectx| ectx.memory.write_uint(nonnull, 0, discr_size))?;
}
},

Expand Down
44 changes: 22 additions & 22 deletions src/librustc_mir/interpret/terminator/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

"arith_offset" => {
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_ty)?;
}
Expand All @@ -61,7 +61,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"atomic_load_acq" |
"volatile_load" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
self.write_value(Value::by_ref(ptr), dest, ty)?;
}

Expand All @@ -70,7 +70,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"atomic_store_rel" |
"volatile_store" => {
let ty = substs.type_at(0);
let dest = arg_vals[0].into_ptr(&mut self.memory)?;
let dest = arg_vals[0].into_ptr(&self.memory)?;
self.write_value_to_ptr(arg_vals[1], dest, ty)?;
}

Expand All @@ -80,12 +80,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

_ if intrinsic_name.starts_with("atomic_xchg") => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let change = self.value_to_primval(arg_vals[1], ty)?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef(..) => bug!("just read the value, can't be byref"),
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
};
self.write_primval(dest, old, ty)?;
Expand All @@ -94,13 +94,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let expect_old = self.value_to_primval(arg_vals[1], ty)?;
let change = self.value_to_primval(arg_vals[2], ty)?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef(..) => bug!("just read the value, can't be byref"),
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
};
let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
Expand All @@ -115,12 +115,12 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
"atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let change = self.value_to_primval(arg_vals[1], ty)?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
Value::ByRef(..) => bug!("just read the value, can't be byref"),
Value::ByRef { .. } => bug!("just read the value, can't be byref"),
Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
};
self.write_primval(dest, old, ty)?;
Expand Down Expand Up @@ -148,8 +148,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
// Also see the write_bytes intrinsic.
let elem_align = self.type_align(elem_ty)?;
let src = arg_vals[0].into_ptr(&mut self.memory)?;
let dest = arg_vals[1].into_ptr(&mut self.memory)?;
let src = arg_vals[0].into_ptr(&self.memory)?;
let dest = arg_vals[1].into_ptr(&self.memory)?;
self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
}
}
Expand All @@ -176,7 +176,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

"discriminant_value" => {
let ty = substs.type_at(0);
let adt_ptr = arg_vals[0].into_ptr(&mut self.memory)?.to_ptr()?;
let adt_ptr = arg_vals[0].into_ptr(&self.memory)?.to_ptr()?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
}
Expand Down Expand Up @@ -251,10 +251,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
let init = |this: &mut Self, val: Value| {
let zero_val = match val {
Value::ByRef(ptr, aligned) => {
Value::ByRef { ptr, aligned } => {
// These writes have no alignment restriction anyway.
this.memory.write_repeat(ptr, 0, size)?;
Value::ByRef(ptr, aligned)
Value::ByRef { ptr, aligned }
},
// TODO(solson): Revisit this, it's fishy to check for Undef here.
Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
Expand Down Expand Up @@ -297,7 +297,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

"move_val_init" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
}

Expand All @@ -310,7 +310,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {

"offset" => {
let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_ty)?;
}
Expand Down Expand Up @@ -399,7 +399,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"transmute" => {
let src_ty = substs.type_at(0);
let ptr = self.force_allocation(dest)?.to_ptr()?;
self.write_maybe_aligned(/*aligned*/false, |ectx| {
self.write_maybe_aligned_mut(/*aligned*/false, |ectx| {
ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
})?;
}
Expand Down Expand Up @@ -442,9 +442,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let size = dest_layout.size(&self.tcx.data_layout).bytes();
let uninit = |this: &mut Self, val: Value| {
match val {
Value::ByRef(ptr, aligned) => {
Value::ByRef { ptr, aligned } => {
this.memory.mark_definedness(ptr, size, false)?;
Ok(Value::ByRef(ptr, aligned))
Ok(Value::ByRef { ptr, aligned })
},
_ => Ok(Value::ByVal(PrimVal::Undef)),
}
Expand All @@ -464,7 +464,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ty_align = self.type_align(ty)?;
let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
let ptr = arg_vals[0].into_ptr(&self.memory)?;
let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
if count > 0 {
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
Expand Down Expand Up @@ -550,15 +550,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok((size, align.abi()))
}
ty::TyDynamic(..) => {
let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
let (_, vtable) = value.into_ptr_vtable_pair(&self.memory)?;
// the second entry in the vtable is the dynamic size of the object.
self.read_size_and_align_from_vtable(vtable)
}

ty::TySlice(_) | ty::TyStr => {
let elem_ty = ty.sequence_element_type(self.tcx);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
let (_, len) = value.into_slice(&mut self.memory)?;
let (_, len) = value.into_slice(&self.memory)?;
let align = self.type_align(elem_ty)?;
Ok((len * elem_size, align as u64))
}
Expand Down
Loading

0 comments on commit 7c6befe

Please sign in to comment.