[go: up one dir, main page]

loom/rt/
mod.rs

1#[macro_use]
2mod location;
3pub(crate) use self::location::Location;
4
5mod access;
6use self::access::Access;
7
8mod alloc;
9pub(crate) use self::alloc::{alloc, dealloc, Allocation};
10
11mod arc;
12pub(crate) use self::arc::Arc;
13
14mod atomic;
15pub(crate) use self::atomic::{fence, Atomic};
16
17pub(crate) mod cell;
18pub(crate) use self::cell::Cell;
19
20mod condvar;
21pub(crate) use self::condvar::Condvar;
22
23mod execution;
24pub(crate) use self::execution::Execution;
25
26mod notify;
27pub(crate) use self::notify::Notify;
28
29mod num;
30pub(crate) use self::num::Numeric;
31
32#[macro_use]
33pub(crate) mod object;
34
35mod mpsc;
36pub(crate) use self::mpsc::Channel;
37
38mod mutex;
39pub(crate) use self::mutex::Mutex;
40
41mod path;
42pub(crate) use self::path::Path;
43
44mod rwlock;
45pub(crate) use self::rwlock::RwLock;
46
47mod scheduler;
48pub(crate) use self::scheduler::Scheduler;
49
50mod synchronize;
51pub(crate) use self::synchronize::Synchronize;
52
53pub(crate) mod lazy_static;
54pub(crate) mod thread;
55
56mod vv;
57pub(crate) use self::vv::VersionVec;
58
59use tracing::trace;
60
61/// Maximum number of threads that can be included in a model.
62pub const MAX_THREADS: usize = 5;
63
64/// Maximum number of atomic store history to track per-cell.
65pub(crate) const MAX_ATOMIC_HISTORY: usize = 7;
66
67pub(crate) fn spawn<F>(stack_size: Option<usize>, f: F) -> crate::rt::thread::Id
68where
69    F: FnOnce() + 'static,
70{
71    let id = execution(|execution| execution.new_thread());
72
73    trace!(thread = ?id, "spawn");
74
75    Scheduler::spawn(
76        stack_size,
77        Box::new(move || {
78            f();
79            thread_done();
80        }),
81    );
82
83    id
84}
85
86/// Marks the current thread as blocked
87pub(crate) fn park(location: Location) {
88    let switch = execution(|execution| {
89        use thread::State;
90        let thread = execution.threads.active_id();
91        let active = execution.threads.active_mut();
92
93        trace!(?thread, ?active.state, "park");
94
95        match active.state {
96            // The thread was previously unparked while it was active. Instead
97            // of parking, consume the unpark.
98            State::Runnable { unparked: true } => {
99                active.set_runnable();
100                return false;
101            }
102            // The thread doesn't have a saved unpark; set its state to blocked.
103            _ => active.set_blocked(location),
104        };
105
106        execution.threads.active_mut().set_blocked(location);
107        execution.threads.active_mut().operation = None;
108        execution.schedule()
109    });
110
111    if switch {
112        Scheduler::switch();
113    }
114}
115
116/// Add an execution branch point.
117fn branch<F, R>(f: F) -> R
118where
119    F: FnOnce(&mut Execution) -> R,
120{
121    let (ret, switch) = execution(|execution| {
122        let ret = f(execution);
123        let switch = execution.schedule();
124
125        trace!(?switch, "branch");
126
127        (ret, switch)
128    });
129
130    if switch {
131        Scheduler::switch();
132    }
133
134    ret
135}
136
137fn synchronize<F, R>(f: F) -> R
138where
139    F: FnOnce(&mut Execution) -> R,
140{
141    execution(|execution| {
142        execution.threads.active_causality_inc();
143        trace!("synchronize");
144        f(execution)
145    })
146}
147
148/// Yield the thread.
149///
150/// This enables concurrent algorithms that require other threads to make
151/// progress.
152pub fn yield_now() {
153    let switch = execution(|execution| {
154        let thread = execution.threads.active_id();
155
156        execution.threads.active_mut().set_yield();
157        execution.threads.active_mut().operation = None;
158        let switch = execution.schedule();
159
160        trace!(?thread, ?switch, "yield_now");
161
162        switch
163    });
164
165    if switch {
166        Scheduler::switch();
167    }
168}
169
170pub(crate) fn execution<F, R>(f: F) -> R
171where
172    F: FnOnce(&mut Execution) -> R,
173{
174    Scheduler::with_execution(f)
175}
176
177pub fn thread_done() {
178    let locals = execution(|execution| {
179        let thread = execution.threads.active_id();
180
181        trace!(?thread, "thread_done: drop locals");
182
183        execution.threads.active_mut().drop_locals()
184    });
185
186    // Drop outside of the execution context
187    drop(locals);
188
189    execution(|execution| {
190        let thread = execution.threads.active_id();
191
192        execution.threads.active_mut().operation = None;
193        execution.threads.active_mut().set_terminated();
194        let switch = execution.schedule();
195        trace!(?thread, ?switch, "thread_done: terminate");
196    });
197}
198
199/// Tells loom to explore possible concurrent executions starting at this point.
200pub fn explore() {
201    execution(|execution| {
202        execution.path.explore_state();
203    })
204}
205
206/// Tells loom to stop exploring possible concurrent executions starting at this
207/// point.
208///
209/// Exploration can be enabled again with `explore`.
210pub fn stop_exploring() {
211    execution(|execution| {
212        execution.path.critical();
213    })
214}
215
216/// Tells loom to stop exploring possible concurrent execution starting at this
217/// point.
218///
219/// Unlike `stop_exploring`, exploration cannot be restarted by `explore`.
220pub fn skip_branch() {
221    execution(|execution| execution.path.skip_branch())
222}