From eea4f0c24893d3b5bffec067e6051eb0b5106748 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 25 May 2016 05:40:57 +0100 Subject: [PATCH 1/5] Update libc to bring in pthread mutex type definitions --- src/liblibc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/liblibc b/src/liblibc index b19b5465a1235..45d85899e99d3 160000 --- a/src/liblibc +++ b/src/liblibc @@ -1 +1 @@ -Subproject commit b19b5465a1235be3323363cdc11838739b593029 +Subproject commit 45d85899e99d33e291b2bf3259881b46cc5365d7 From d73f5e65ecbcb6a0acb908b54226edfccf47eccc Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 25 May 2016 05:44:28 +0100 Subject: [PATCH 2/5] Fix undefined behavior when re-locking a mutex from the same thread The only applies to pthread mutexes. We solve this by creating the mutex with the PTHREAD_MUTEX_NORMAL type, which guarantees that re-locking from the same thread will deadlock. --- src/libstd/sync/mutex.rs | 6 +++++- src/libstd/sys/common/mutex.rs | 6 ++++++ src/libstd/sys/unix/mutex.rs | 33 +++++++++++++++++++++++++++++++++ src/libstd/sys/windows/mutex.rs | 2 ++ 4 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index 90cc79dad66bb..e53a97eccdfca 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -190,10 +190,14 @@ impl Mutex { /// Creates a new mutex in an unlocked state ready for use. #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> Mutex { - Mutex { + let mut m = Mutex { inner: box StaticMutex::new(), data: UnsafeCell::new(t), + }; + unsafe { + m.inner.lock.init(); } + m } } diff --git a/src/libstd/sys/common/mutex.rs b/src/libstd/sys/common/mutex.rs index 5a6dfe7fb1a15..7a2183c522f5b 100644 --- a/src/libstd/sys/common/mutex.rs +++ b/src/libstd/sys/common/mutex.rs @@ -27,6 +27,12 @@ impl Mutex { /// first used with any of the functions below. pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) } + /// Prepare the mutex for use. + /// + /// This should be called once the mutex is at a stable memory address. + #[inline] + pub unsafe fn init(&mut self) { self.0.init() } + /// Locks the mutex blocking the current thread until it is available. /// /// Behavior is undefined if the mutex has been moved between this and any diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs index 4e4abcfbeee4d..52cf3f97c5c83 100644 --- a/src/libstd/sys/unix/mutex.rs +++ b/src/libstd/sys/unix/mutex.rs @@ -30,6 +30,39 @@ impl Mutex { Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } } #[inline] + pub unsafe fn init(&mut self) { + // Issue #33770 + // + // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have + // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you + // try to re-lock it from the same thread when you already hold a lock. + // + // In practice, glibc takes advantage of this undefined behavior to + // implement hardware lock elision, which uses hardware transactional + // memory to avoid acquiring the lock. While a transaction is in + // progress, the lock appears to be unlocked. This isn't a problem for + // other threads since the transactional memory will abort if a conflict + // is detected, however no abort is generated if re-locking from the + // same thread. + // + // Since locking the same mutex twice will result in two aliasing &mut + // references, we instead create the mutex with type + // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to + // re-lock it from the same thread, thus avoiding undefined behavior. + // + // We can't do anything for StaticMutex, but that type is deprecated + // anyways. + let mut attr: libc::pthread_mutexattr_t = mem::uninitialized(); + let r = libc::pthread_mutexattr_init(&mut attr); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutex_init(self.inner.get(), &attr); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_destroy(&mut attr); + debug_assert_eq!(r, 0); + } + #[inline] pub unsafe fn lock(&self) { let r = libc::pthread_mutex_lock(self.inner.get()); debug_assert_eq!(r, 0); diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index b770156582d3b..8762b34e3da48 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -64,6 +64,8 @@ impl Mutex { held: UnsafeCell::new(false), } } + #[inline] + pub unsafe fn init(&mut self) {} pub unsafe fn lock(&self) { match kind() { Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)), From 960d1b74c5b8db59b871af50d948168f64b1be8a Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 25 May 2016 06:07:54 +0100 Subject: [PATCH 3/5] Don't allow pthread_rwlock_t to recursively lock itself This is allowed by POSIX and can happen on glibc with processors that support hardware lock elision. --- src/libstd/sys/unix/rwlock.rs | 61 +++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs index 44bd5d895f2e4..72ab70aeac482 100644 --- a/src/libstd/sys/unix/rwlock.rs +++ b/src/libstd/sys/unix/rwlock.rs @@ -11,14 +11,20 @@ use libc; use cell::UnsafeCell; -pub struct RWLock { inner: UnsafeCell } +pub struct RWLock { + inner: UnsafeCell, + write_locked: UnsafeCell, +} unsafe impl Send for RWLock {} unsafe impl Sync for RWLock {} impl RWLock { pub const fn new() -> RWLock { - RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) } + RWLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + } } #[inline] pub unsafe fn read(&self) { @@ -35,7 +41,16 @@ impl RWLock { // // We roughly maintain the deadlocking behavior by panicking to ensure // that this lock acquisition does not succeed. - if r == libc::EDEADLK { + // + // We also check whether there this lock is already write locked. This + // is only possible if it was write locked by the current thread and + // the implementation allows recursive locking. The POSIX standard + // doesn't require recursivly locking a rwlock to deadlock, but we can't + // allow that because it could lead to aliasing issues. + if r == libc::EDEADLK || *self.write_locked.get() { + if r == 0 { + self.raw_unlock(); + } panic!("rwlock read lock would result in deadlock"); } else { debug_assert_eq!(r, 0); @@ -43,29 +58,57 @@ impl RWLock { } #[inline] pub unsafe fn try_read(&self) -> bool { - libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0 + let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); + if r == 0 && *self.write_locked.get() { + self.raw_unlock(); + false + } else { + r == 0 + } } #[inline] pub unsafe fn write(&self) { let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // see comments above for why we check for EDEADLK - if r == libc::EDEADLK { + // see comments above for why we check for EDEADLK and write_locked + if r == libc::EDEADLK || *self.write_locked.get() { + if r == 0 { + self.raw_unlock(); + } panic!("rwlock write lock would result in deadlock"); } else { debug_assert_eq!(r, 0); } + *self.write_locked.get() = true; } #[inline] pub unsafe fn try_write(&self) -> bool { - libc::pthread_rwlock_trywrlock(self.inner.get()) == 0 + let r = libc::pthread_rwlock_trywrlock(self.inner.get()); + if r == 0 && *self.write_locked.get() { + self.raw_unlock(); + false + } else if r == 0 { + *self.write_locked.get() = true; + true + } else { + false + } } #[inline] - pub unsafe fn read_unlock(&self) { + unsafe fn raw_unlock(&self) { let r = libc::pthread_rwlock_unlock(self.inner.get()); debug_assert_eq!(r, 0); } #[inline] - pub unsafe fn write_unlock(&self) { self.read_unlock() } + pub unsafe fn read_unlock(&self) { + debug_assert!(!*self.write_locked.get()); + self.raw_unlock(); + } + #[inline] + pub unsafe fn write_unlock(&self) { + debug_assert!(*self.write_locked.get()); + *self.write_locked.get() = false; + self.raw_unlock(); + } #[inline] pub unsafe fn destroy(&self) { let r = libc::pthread_rwlock_destroy(self.inner.get()); From f3c68a0fdf119a3b285c33c38f9f7eebd053c853 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 26 May 2016 06:10:43 +0100 Subject: [PATCH 4/5] Add a test to ensure mutexes and rwlocks can't be re-locked --- src/test/run-pass/issue-33770.rs | 100 +++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 src/test/run-pass/issue-33770.rs diff --git a/src/test/run-pass/issue-33770.rs b/src/test/run-pass/issue-33770.rs new file mode 100644 index 0000000000000..f5635fddaf951 --- /dev/null +++ b/src/test/run-pass/issue-33770.rs @@ -0,0 +1,100 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::process::{Command, Stdio}; +use std::env; +use std::sync::{Mutex, RwLock}; +use std::time::Duration; +use std::thread; + +fn test_mutex() { + let m = Mutex::new(0); + let _g = m.lock().unwrap(); + let _g2 = m.lock().unwrap(); +} + +fn test_try_mutex() { + let m = Mutex::new(0); + let _g = m.lock().unwrap(); + let _g2 = m.try_lock().unwrap(); +} + +fn test_rwlock_ww() { + let m = RwLock::new(0); + let _g = m.write().unwrap(); + let _g2 = m.write().unwrap(); +} + +fn test_try_rwlock_ww() { + let m = RwLock::new(0); + let _g = m.write().unwrap(); + let _g2 = m.try_write().unwrap(); +} + +fn test_rwlock_rw() { + let m = RwLock::new(0); + let _g = m.read().unwrap(); + let _g2 = m.write().unwrap(); +} + +fn test_try_rwlock_rw() { + let m = RwLock::new(0); + let _g = m.read().unwrap(); + let _g2 = m.try_write().unwrap(); +} + +fn test_rwlock_wr() { + let m = RwLock::new(0); + let _g = m.write().unwrap(); + let _g2 = m.read().unwrap(); +} + +fn test_try_rwlock_wr() { + let m = RwLock::new(0); + let _g = m.write().unwrap(); + let _g2 = m.try_read().unwrap(); +} + +fn main() { + let args: Vec = env::args().collect(); + if args.len() > 1 { + match &*args[1] { + "mutex" => test_mutex(), + "try_mutex" => test_try_mutex(), + "rwlock_ww" => test_rwlock_ww(), + "try_rwlock_ww" => test_try_rwlock_ww(), + "rwlock_rw" => test_rwlock_rw(), + "try_rwlock_rw" => test_try_rwlock_rw(), + "rwlock_wr" => test_rwlock_wr(), + "try_rwlock_wr" => test_try_rwlock_wr(), + _ => unreachable!(), + } + // If we reach this point then the test failed + println!("TEST FAILED: {}", args[1]); + } else { + let mut v = vec![]; + v.push(Command::new(&args[0]).arg("mutex").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("try_mutex").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("rwlock_ww").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("try_rwlock_ww").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("rwlock_rw").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("try_rwlock_rw").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("rwlock_wr").stderr(Stdio::null()).spawn().unwrap()); + v.push(Command::new(&args[0]).arg("try_rwlock_wr").stderr(Stdio::null()).spawn().unwrap()); + + thread::sleep(Duration::new(1, 0)); + + // Make sure all subprocesses either panicked or were killed because they deadlocked + for mut c in v { + c.kill().ok(); + assert!(!c.wait().unwrap().success()); + } + } +} From fc4b35612550d833cefcd586cb13ebc0dc5a51e1 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 26 May 2016 06:32:15 +0100 Subject: [PATCH 5/5] Fix rwlock successfully acquiring a write lock after a read lock --- src/libstd/sys/unix/rwlock.rs | 39 ++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs index 72ab70aeac482..fbd4e1d120817 100644 --- a/src/libstd/sys/unix/rwlock.rs +++ b/src/libstd/sys/unix/rwlock.rs @@ -10,10 +10,12 @@ use libc; use cell::UnsafeCell; +use sync::atomic::{AtomicUsize, Ordering}; pub struct RWLock { inner: UnsafeCell, write_locked: UnsafeCell, + num_readers: AtomicUsize, } unsafe impl Send for RWLock {} @@ -24,6 +26,7 @@ impl RWLock { RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), } } #[inline] @@ -54,23 +57,31 @@ impl RWLock { panic!("rwlock read lock would result in deadlock"); } else { debug_assert_eq!(r, 0); + self.num_readers.fetch_add(1, Ordering::Relaxed); } } #[inline] pub unsafe fn try_read(&self) -> bool { let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); - if r == 0 && *self.write_locked.get() { - self.raw_unlock(); - false + if r == 0 { + if *self.write_locked.get() { + self.raw_unlock(); + false + } else { + self.num_readers.fetch_add(1, Ordering::Relaxed); + true + } } else { - r == 0 + false } } #[inline] pub unsafe fn write(&self) { let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // see comments above for why we check for EDEADLK and write_locked - if r == libc::EDEADLK || *self.write_locked.get() { + // See comments above for why we check for EDEADLK and write_locked. We + // also need to check that num_readers is 0. + if r == libc::EDEADLK || *self.write_locked.get() || + self.num_readers.load(Ordering::Relaxed) != 0 { if r == 0 { self.raw_unlock(); } @@ -83,12 +94,14 @@ impl RWLock { #[inline] pub unsafe fn try_write(&self) -> bool { let r = libc::pthread_rwlock_trywrlock(self.inner.get()); - if r == 0 && *self.write_locked.get() { - self.raw_unlock(); - false - } else if r == 0 { - *self.write_locked.get() = true; - true + if r == 0 { + if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { + self.raw_unlock(); + false + } else { + *self.write_locked.get() = true; + true + } } else { false } @@ -101,10 +114,12 @@ impl RWLock { #[inline] pub unsafe fn read_unlock(&self) { debug_assert!(!*self.write_locked.get()); + self.num_readers.fetch_sub(1, Ordering::Relaxed); self.raw_unlock(); } #[inline] pub unsafe fn write_unlock(&self) { + debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); debug_assert!(*self.write_locked.get()); *self.write_locked.get() = false; self.raw_unlock();