diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 2281609a04..f2bec972b1 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -847,6 +847,7 @@ impl VClockAlloc { kind: MemoryKind, current_span: Span, ) -> VClockAlloc { + // Determine the thread that did the allocation, and when it did it. let (alloc_timestamp, alloc_index) = match kind { // User allocated and stack memory should track allocation. MemoryKind::Machine( @@ -858,13 +859,13 @@ impl VClockAlloc { | MiriMemoryKind::Mmap, ) | MemoryKind::Stack => { - let (alloc_index, clocks) = global.current_thread_state(thread_mgr); + let (alloc_index, clocks) = global.active_thread_state(thread_mgr); let mut alloc_timestamp = clocks.clock[alloc_index]; alloc_timestamp.span = current_span; (alloc_timestamp, alloc_index) } // Other global memory should trace races but be allocated at the 0 timestamp - // (conceptually they are allocated before everything). + // (conceptually they are allocated on the main thread before everything). MemoryKind::Machine( MiriMemoryKind::Global | MiriMemoryKind::Machine @@ -872,7 +873,8 @@ impl VClockAlloc { | MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls, ) - | MemoryKind::CallerLocation => (VTimestamp::ZERO, VectorIdx::MAX_INDEX), + | MemoryKind::CallerLocation => + (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)), }; VClockAlloc { alloc_ranges: RefCell::new(RangeMap::new( @@ -930,7 +932,7 @@ impl VClockAlloc { ptr_dbg: Pointer, ty: Option>, ) -> InterpResult<'tcx> { - let (current_index, current_clocks) = global.current_thread_state(thread_mgr); + let (active_index, active_clocks) = global.active_thread_state(thread_mgr); let mut other_size = None; // if `Some`, this was a size-mismatch race let write_clock; let (other_access, other_thread, other_clock) = @@ -939,30 +941,30 @@ impl VClockAlloc { // we are reporting races between two non-atomic reads. if !access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && - let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock) + let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) { (AccessType::AtomicStore, idx, &atomic.write_vector) } else if !access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && - let Some(idx) = Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock) + let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock) { (AccessType::AtomicLoad, idx, &atomic.read_vector) // Then check races with non-atomic writes/reads. - } else if mem_clocks.write.1 > current_clocks.clock[mem_clocks.write.0] { + } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] { write_clock = mem_clocks.write(); (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock) - } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, ¤t_clocks.clock) { + } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) { (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read) // Finally, mixed-size races. } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != access_size { // This is only a race if we are not synchronized with all atomic accesses, so find // the one we are not synchronized with. other_size = Some(atomic.size); - if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock) + if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock) { (AccessType::AtomicStore, idx, &atomic.write_vector) } else if let Some(idx) = - Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock) + Self::find_gt_index(&atomic.read_vector, &active_clocks.clock) { (AccessType::AtomicLoad, idx, &atomic.read_vector) } else { @@ -975,7 +977,7 @@ impl VClockAlloc { }; // Load elaborated thread information about the racing thread actions. - let current_thread_info = global.print_thread_metadata(thread_mgr, current_index); + let active_thread_info = global.print_thread_metadata(thread_mgr, active_index); let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread); let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic(); @@ -1003,8 +1005,8 @@ impl VClockAlloc { }, op2: RacingOp { action: access.description(ty, other_size.map(|_| access_size)), - thread_info: current_thread_info, - span: current_clocks.clock.as_slice()[current_index.index()].span_data(), + thread_info: active_thread_info, + span: active_clocks.clock.as_slice()[active_index.index()].span_data(), }, }))? } @@ -1026,7 +1028,7 @@ impl VClockAlloc { let current_span = machine.current_span(); let global = machine.data_race.as_ref().unwrap(); if global.race_detecting() { - let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads); + let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); let mut alloc_ranges = self.alloc_ranges.borrow_mut(); for (mem_clocks_range, mem_clocks) in alloc_ranges.iter_mut(access_range.start, access_range.size) @@ -1069,7 +1071,7 @@ impl VClockAlloc { let current_span = machine.current_span(); let global = machine.data_race.as_mut().unwrap(); if global.race_detecting() { - let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads); + let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); for (mem_clocks_range, mem_clocks) in self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size) { @@ -1454,7 +1456,7 @@ impl GlobalState { // Setup the main-thread since it is not explicitly created: // uses vector index and thread-id 0. let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default()); - global_state.vector_info.get_mut().push(ThreadId::new(0)); + global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD); global_state .thread_info .get_mut() @@ -1518,7 +1520,7 @@ impl GlobalState { thread: ThreadId, current_span: Span, ) { - let current_index = self.current_index(thread_mgr); + let current_index = self.active_thread_index(thread_mgr); // Enable multi-threaded execution, there are now at least two threads // so data-races are now possible. @@ -1642,7 +1644,7 @@ impl GlobalState { /// `thread_joined`. #[inline] pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>, current_span: Span) { - let current_index = self.current_index(thread_mgr); + let current_index = self.active_thread_index(thread_mgr); // Increment the clock to a unique termination timestamp. let vector_clocks = self.vector_clocks.get_mut(); @@ -1680,9 +1682,9 @@ impl GlobalState { op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>, ) -> InterpResult<'tcx> { if self.multi_threaded.get() { - let (index, clocks) = self.current_thread_state_mut(thread_mgr); + let (index, clocks) = self.active_thread_state_mut(thread_mgr); if op(index, clocks)? { - let (_, mut clocks) = self.current_thread_state_mut(thread_mgr); + let (_, mut clocks) = self.active_thread_state_mut(thread_mgr); clocks.increment_clock(index, current_span); } } @@ -1725,13 +1727,15 @@ impl GlobalState { Ref::map(clocks, |c| &c.clock) } + fn thread_index(&self, thread: ThreadId) -> VectorIdx { + self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector") + } + /// Load the vector index used by the given thread as well as the set of vector clocks /// used by the thread. #[inline] fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { - let index = self.thread_info.borrow()[thread] - .vector_index - .expect("Loading thread state for thread with no assigned vector"); + let index = self.thread_index(thread); let ref_vector = self.vector_clocks.borrow_mut(); let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]); (index, clocks) @@ -1741,9 +1745,7 @@ impl GlobalState { /// used by the thread. #[inline] fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) { - let index = self.thread_info.borrow()[thread] - .vector_index - .expect("Loading thread state for thread with no assigned vector"); + let index = self.thread_index(thread); let ref_vector = self.vector_clocks.borrow(); let clocks = Ref::map(ref_vector, |vec| &vec[index]); (index, clocks) @@ -1752,7 +1754,7 @@ impl GlobalState { /// Load the current vector clock in use and the current set of thread clocks /// in use for the vector. #[inline] - pub(super) fn current_thread_state( + pub(super) fn active_thread_state( &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, Ref<'_, ThreadClockSet>) { @@ -1762,7 +1764,7 @@ impl GlobalState { /// Load the current vector clock in use and the current set of thread clocks /// in use for the vector mutably for modification. #[inline] - pub(super) fn current_thread_state_mut( + pub(super) fn active_thread_state_mut( &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { @@ -1772,22 +1774,20 @@ impl GlobalState { /// Return the current thread, should be the same /// as the data-race active thread. #[inline] - fn current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx { + fn active_thread_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx { let active_thread_id = thread_mgr.get_active_thread_id(); - self.thread_info.borrow()[active_thread_id] - .vector_index - .expect("active thread has no assigned vector") + self.thread_index(active_thread_id) } // SC ATOMIC STORE rule in the paper. pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_, '_>) { - let (index, clocks) = self.current_thread_state(thread_mgr); + let (index, clocks) = self.active_thread_state(thread_mgr); self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index); } // SC ATOMIC READ rule in the paper. pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_, '_>) { - let (.., mut clocks) = self.current_thread_state_mut(thread_mgr); + let (.., mut clocks) = self.active_thread_state_mut(thread_mgr); clocks.read_seqcst.join(&self.last_sc_fence.borrow()); } } diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 2fabd39a74..0116bd0281 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -57,6 +57,8 @@ impl ThreadId { pub fn to_u32(self) -> u32 { self.0 } + + pub const MAIN_THREAD: ThreadId = ThreadId(0); } impl Idx for ThreadId { @@ -401,7 +403,7 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { // Create the main thread and add it to the list of threads. threads.push(Thread::new(Some("main"), None)); Self { - active_thread: ThreadId::new(0), + active_thread: ThreadId::MAIN_THREAD, threads, sync: SynchronizationState::default(), thread_local_alloc_ids: Default::default(), @@ -416,10 +418,12 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { ecx: &mut MiriInterpCx<'mir, 'tcx>, on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>, ) { - ecx.machine.threads.threads[ThreadId::new(0)].on_stack_empty = Some(on_main_stack_empty); + ecx.machine.threads.threads[ThreadId::MAIN_THREAD].on_stack_empty = + Some(on_main_stack_empty); if ecx.tcx.sess.target.os.as_ref() != "windows" { // The main thread can *not* be joined on except on windows. - ecx.machine.threads.threads[ThreadId::new(0)].join_status = ThreadJoinStatus::Detached; + ecx.machine.threads.threads[ThreadId::MAIN_THREAD].join_status = + ThreadJoinStatus::Detached; } } diff --git a/src/concurrency/vector_clock.rs b/src/concurrency/vector_clock.rs index 2cd3d031b1..c3496bc1a0 100644 --- a/src/concurrency/vector_clock.rs +++ b/src/concurrency/vector_clock.rs @@ -13,15 +13,13 @@ use super::data_race::NaReadType; /// but in some cases one vector index may be shared with /// multiple thread ids if it's safe to do so. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] -pub struct VectorIdx(u32); +pub(super) struct VectorIdx(u32); impl VectorIdx { #[inline(always)] - pub fn to_u32(self) -> u32 { + fn to_u32(self) -> u32 { self.0 } - - pub const MAX_INDEX: VectorIdx = VectorIdx(u32::MAX); } impl Idx for VectorIdx { @@ -51,7 +49,7 @@ const SMALL_VECTOR: usize = 4; /// a 32-bit unsigned integer which is the actual timestamp, and a `Span` /// so that diagnostics can report what code was responsible for an operation. #[derive(Clone, Copy, Debug)] -pub struct VTimestamp { +pub(super) struct VTimestamp { /// The lowest bit indicates read type, the rest is the time. /// `1` indicates a retag read, `0` a regular read. time_and_read_type: u32, @@ -87,7 +85,7 @@ impl VTimestamp { } #[inline] - pub fn read_type(&self) -> NaReadType { + pub(super) fn read_type(&self) -> NaReadType { if self.time_and_read_type & 1 == 0 { NaReadType::Read } else { NaReadType::Retag } } @@ -97,7 +95,7 @@ impl VTimestamp { } #[inline] - pub fn span_data(&self) -> SpanData { + pub(super) fn span_data(&self) -> SpanData { self.span.data() } } diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index 9ebb64afd3..f544393cfe 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -270,7 +270,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { ) { let store_elem = self.buffer.back(); if let Some(store_elem) = store_elem { - let (index, clocks) = global.current_thread_state(thread_mgr); + let (index, clocks) = global.active_thread_state(thread_mgr); store_elem.load_impl(index, &clocks, is_seqcst); } } @@ -289,7 +289,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { let (store_elem, recency) = { // The `clocks` we got here must be dropped before calling validate_atomic_load // as the race detector will update it - let (.., clocks) = global.current_thread_state(thread_mgr); + let (.., clocks) = global.active_thread_state(thread_mgr); // Load from a valid entry in the store buffer self.fetch_store(is_seqcst, &clocks, &mut *rng) }; @@ -300,7 +300,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { // requires access to ThreadClockSet.clock, which is updated by the race detector validate()?; - let (index, clocks) = global.current_thread_state(thread_mgr); + let (index, clocks) = global.active_thread_state(thread_mgr); let loaded = store_elem.load_impl(index, &clocks, is_seqcst); Ok((loaded, recency)) } @@ -312,7 +312,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { thread_mgr: &ThreadManager<'_, '_>, is_seqcst: bool, ) -> InterpResult<'tcx> { - let (index, clocks) = global.current_thread_state(thread_mgr); + let (index, clocks) = global.active_thread_state(thread_mgr); self.store_impl(val, index, &clocks.clock, is_seqcst); Ok(())