use crate::impl_::not_send::{NotSend, NOT_SEND};
use crate::{ffi, Python};
use parking_lot::{const_mutex, Mutex, Once};
use std::cell::Cell;
#[cfg(debug_assertions)]
use std::cell::RefCell;
#[cfg(not(debug_assertions))]
use std::cell::UnsafeCell;
use std::{mem, ptr::NonNull};
static START: Once = Once::new();
cfg_if::cfg_if! {
if #[cfg(thread_local_const_init)] {
use std::thread_local as thread_local_const_init;
} else {
macro_rules! thread_local_const_init {
($($(#[$attr:meta])* static $name:ident: $ty:ty = const { $init:expr };)*) => (
thread_local! { $($(#[$attr])* static $name: $ty = $init;)* }
)
}
}
}
thread_local_const_init! {
static GIL_COUNT: Cell<isize> = const { Cell::new(0) };
#[cfg(debug_assertions)]
static OWNED_OBJECTS: RefCell<PyObjVec> = const { RefCell::new(Vec::new()) };
#[cfg(not(debug_assertions))]
static OWNED_OBJECTS: UnsafeCell<PyObjVec> = const { UnsafeCell::new(Vec::new()) };
}
const GIL_LOCKED_DURING_TRAVERSE: isize = -1;
#[inline(always)]
fn gil_is_acquired() -> bool {
GIL_COUNT.try_with(|c| c.get() > 0).unwrap_or(false)
}
#[cfg(not(PyPy))]
pub fn prepare_freethreaded_python() {
START.call_once_force(|_| unsafe {
if ffi::Py_IsInitialized() == 0 {
ffi::Py_InitializeEx(0);
ffi::PyEval_SaveThread();
}
});
}
#[cfg(not(PyPy))]
pub unsafe fn with_embedded_python_interpreter<F, R>(f: F) -> R
where
F: for<'p> FnOnce(Python<'p>) -> R,
{
assert_eq!(
ffi::Py_IsInitialized(),
0,
"called `with_embedded_python_interpreter` but a Python interpreter is already running."
);
ffi::Py_InitializeEx(0);
let pool = GILPool::new();
pool.python().import("threading").unwrap();
let result = f(pool.python());
drop(pool);
ffi::Py_Finalize();
result
}
pub(crate) struct GILGuard {
gstate: ffi::PyGILState_STATE,
pool: mem::ManuallyDrop<GILPool>,
}
impl GILGuard {
pub(crate) fn acquire() -> Option<Self> {
if gil_is_acquired() {
return None;
}
cfg_if::cfg_if! {
if #[cfg(all(feature = "auto-initialize", not(PyPy)))] {
prepare_freethreaded_python();
} else {
#[cfg(not(PyPy))]
if option_env!("CARGO_PRIMARY_PACKAGE").is_some() {
prepare_freethreaded_python();
}
START.call_once_force(|_| unsafe {
assert_ne!(
ffi::Py_IsInitialized(),
0,
"The Python interpreter is not initialized and the `auto-initialize` \
feature is not enabled.\n\n\
Consider calling `pyo3::prepare_freethreaded_python()` before attempting \
to use Python APIs."
);
});
}
}
Self::acquire_unchecked()
}
pub(crate) fn acquire_unchecked() -> Option<Self> {
if gil_is_acquired() {
return None;
}
let gstate = unsafe { ffi::PyGILState_Ensure() }; let pool = unsafe { mem::ManuallyDrop::new(GILPool::new()) };
Some(GILGuard { gstate, pool })
}
}
impl Drop for GILGuard {
fn drop(&mut self) {
unsafe {
mem::ManuallyDrop::drop(&mut self.pool);
ffi::PyGILState_Release(self.gstate);
}
}
}
type PyObjVec = Vec<NonNull<ffi::PyObject>>;
struct ReferencePool {
pointer_ops: Mutex<(PyObjVec, PyObjVec)>,
}
impl ReferencePool {
const fn new() -> Self {
Self {
pointer_ops: const_mutex((Vec::new(), Vec::new())),
}
}
fn register_incref(&self, obj: NonNull<ffi::PyObject>) {
self.pointer_ops.lock().0.push(obj);
}
fn register_decref(&self, obj: NonNull<ffi::PyObject>) {
self.pointer_ops.lock().1.push(obj);
}
fn update_counts(&self, _py: Python<'_>) {
let mut ops = self.pointer_ops.lock();
if ops.0.is_empty() && ops.1.is_empty() {
return;
}
let (increfs, decrefs) = mem::take(&mut *ops);
drop(ops);
for ptr in increfs {
unsafe { ffi::Py_INCREF(ptr.as_ptr()) };
}
for ptr in decrefs {
unsafe { ffi::Py_DECREF(ptr.as_ptr()) };
}
}
}
unsafe impl Sync for ReferencePool {}
static POOL: ReferencePool = ReferencePool::new();
pub(crate) struct SuspendGIL {
count: isize,
tstate: *mut ffi::PyThreadState,
}
impl SuspendGIL {
pub(crate) unsafe fn new() -> Self {
let count = GIL_COUNT.with(|c| c.replace(0));
let tstate = ffi::PyEval_SaveThread();
Self { count, tstate }
}
}
impl Drop for SuspendGIL {
fn drop(&mut self) {
GIL_COUNT.with(|c| c.set(self.count));
unsafe {
ffi::PyEval_RestoreThread(self.tstate);
POOL.update_counts(Python::assume_gil_acquired());
}
}
}
pub(crate) struct LockGIL {
count: isize,
}
impl LockGIL {
pub fn during_traverse() -> Self {
Self::new(GIL_LOCKED_DURING_TRAVERSE)
}
fn new(reason: isize) -> Self {
let count = GIL_COUNT.with(|c| c.replace(reason));
Self { count }
}
#[cold]
fn bail(current: isize) {
match current {
GIL_LOCKED_DURING_TRAVERSE => panic!(
"Access to the GIL is prohibited while a __traverse__ implmentation is running."
),
_ => panic!("Access to the GIL is currently prohibited."),
}
}
}
impl Drop for LockGIL {
fn drop(&mut self) {
GIL_COUNT.with(|c| c.set(self.count));
}
}
pub struct GILPool {
start: Option<usize>,
_not_send: NotSend,
}
impl GILPool {
#[inline]
pub unsafe fn new() -> GILPool {
increment_gil_count();
POOL.update_counts(Python::assume_gil_acquired());
GILPool {
start: OWNED_OBJECTS
.try_with(|owned_objects| {
#[cfg(debug_assertions)]
let len = owned_objects.borrow().len();
#[cfg(not(debug_assertions))]
let len = unsafe { (*owned_objects.get()).len() };
len
})
.ok(),
_not_send: NOT_SEND,
}
}
#[inline]
pub fn python(&self) -> Python<'_> {
unsafe { Python::assume_gil_acquired() }
}
}
impl Drop for GILPool {
fn drop(&mut self) {
if let Some(start) = self.start {
let owned_objects = OWNED_OBJECTS.with(|owned_objects| {
#[cfg(debug_assertions)]
let mut owned_objects = owned_objects.borrow_mut();
#[cfg(not(debug_assertions))]
let owned_objects = unsafe { &mut *owned_objects.get() };
if start < owned_objects.len() {
owned_objects.split_off(start)
} else {
Vec::new()
}
});
for obj in owned_objects {
unsafe {
ffi::Py_DECREF(obj.as_ptr());
}
}
}
decrement_gil_count();
}
}
pub unsafe fn register_incref(obj: NonNull<ffi::PyObject>) {
if gil_is_acquired() {
ffi::Py_INCREF(obj.as_ptr())
} else {
POOL.register_incref(obj);
}
}
pub unsafe fn register_decref(obj: NonNull<ffi::PyObject>) {
if gil_is_acquired() {
ffi::Py_DECREF(obj.as_ptr())
} else {
POOL.register_decref(obj);
}
}
pub unsafe fn register_owned(_py: Python<'_>, obj: NonNull<ffi::PyObject>) {
debug_assert!(gil_is_acquired());
let _ = OWNED_OBJECTS.try_with(|owned_objects| {
#[cfg(debug_assertions)]
owned_objects.borrow_mut().push(obj);
#[cfg(not(debug_assertions))]
unsafe {
(*owned_objects.get()).push(obj);
}
});
}
#[inline(always)]
fn increment_gil_count() {
let _ = GIL_COUNT.try_with(|c| {
let current = c.get();
if current < 0 {
LockGIL::bail(current);
}
c.set(current + 1);
});
}
#[inline(always)]
fn decrement_gil_count() {
let _ = GIL_COUNT.try_with(|c| {
let current = c.get();
debug_assert!(
current > 0,
"Negative GIL count detected. Please report this error to the PyO3 repo as a bug."
);
c.set(current - 1);
});
}
#[cfg(test)]
mod tests {
use super::{gil_is_acquired, GILPool, GIL_COUNT, OWNED_OBJECTS, POOL};
use crate::{ffi, gil, PyObject, Python, ToPyObject};
#[cfg(not(target_arch = "wasm32"))]
use parking_lot::{const_mutex, Condvar, Mutex};
use std::ptr::NonNull;
fn get_object(py: Python<'_>) -> PyObject {
let pool = unsafe { py.new_pool() };
let py = pool.python();
let obj = py.eval("object()", None, None).unwrap();
obj.to_object(py)
}
fn owned_object_count() -> usize {
#[cfg(debug_assertions)]
let len = OWNED_OBJECTS.with(|owned_objects| owned_objects.borrow().len());
#[cfg(not(debug_assertions))]
let len = OWNED_OBJECTS.with(|owned_objects| unsafe { (*owned_objects.get()).len() });
len
}
fn pool_inc_refs_does_not_contain(obj: &PyObject) -> bool {
!POOL
.pointer_ops
.lock()
.0
.contains(&unsafe { NonNull::new_unchecked(obj.as_ptr()) })
}
fn pool_dec_refs_does_not_contain(obj: &PyObject) -> bool {
!POOL
.pointer_ops
.lock()
.1
.contains(&unsafe { NonNull::new_unchecked(obj.as_ptr()) })
}
#[cfg(not(target_arch = "wasm32"))]
fn pool_dirty_with(
inc_refs: Vec<NonNull<ffi::PyObject>>,
dec_refs: Vec<NonNull<ffi::PyObject>>,
) -> bool {
*POOL.pointer_ops.lock() == (inc_refs, dec_refs)
}
#[test]
fn test_owned() {
Python::with_gil(|py| {
let obj = get_object(py);
let obj_ptr = obj.as_ptr();
let _ref = obj.clone_ref(py);
unsafe {
{
let pool = py.new_pool();
gil::register_owned(pool.python(), NonNull::new_unchecked(obj.into_ptr()));
assert_eq!(owned_object_count(), 1);
assert_eq!(ffi::Py_REFCNT(obj_ptr), 2);
}
{
let _pool = py.new_pool();
assert_eq!(owned_object_count(), 0);
assert_eq!(ffi::Py_REFCNT(obj_ptr), 1);
}
}
})
}
#[test]
fn test_owned_nested() {
Python::with_gil(|py| {
let obj = get_object(py);
let _ref = obj.clone_ref(py);
let obj_ptr = obj.as_ptr();
unsafe {
{
let _pool = py.new_pool();
assert_eq!(owned_object_count(), 0);
gil::register_owned(py, NonNull::new_unchecked(obj.into_ptr()));
assert_eq!(owned_object_count(), 1);
assert_eq!(ffi::Py_REFCNT(obj_ptr), 2);
{
let _pool = py.new_pool();
let obj = get_object(py);
gil::register_owned(py, NonNull::new_unchecked(obj.into_ptr()));
assert_eq!(owned_object_count(), 2);
}
assert_eq!(owned_object_count(), 1);
}
{
assert_eq!(owned_object_count(), 0);
assert_eq!(ffi::Py_REFCNT(obj_ptr), 1);
}
}
});
}
#[test]
fn test_pyobject_drop_with_gil_decreases_refcnt() {
Python::with_gil(|py| {
let obj = get_object(py);
let reference = obj.clone_ref(py);
assert_eq!(obj.get_refcnt(py), 2);
assert!(pool_inc_refs_does_not_contain(&obj));
drop(reference);
assert_eq!(obj.get_refcnt(py), 1);
assert!(pool_dec_refs_does_not_contain(&obj));
});
}
#[test]
#[cfg(not(target_arch = "wasm32"))] fn test_pyobject_drop_without_gil_doesnt_decrease_refcnt() {
let obj = Python::with_gil(|py| {
let obj = get_object(py);
let reference = obj.clone_ref(py);
assert_eq!(obj.get_refcnt(py), 2);
assert!(pool_inc_refs_does_not_contain(&obj));
std::thread::spawn(move || drop(reference)).join().unwrap();
assert_eq!(obj.get_refcnt(py), 2);
assert!(pool_dirty_with(
vec![],
vec![NonNull::new(obj.as_ptr()).unwrap()]
));
obj
});
Python::with_gil(|py| {
assert_eq!(obj.get_refcnt(py), 1);
let non_null = unsafe { NonNull::new_unchecked(obj.as_ptr()) };
assert!(!POOL.pointer_ops.lock().0.contains(&non_null));
assert!(!POOL.pointer_ops.lock().1.contains(&non_null));
});
}
#[test]
fn test_gil_counts() {
let get_gil_count = || GIL_COUNT.with(|c| c.get());
assert_eq!(get_gil_count(), 0);
Python::with_gil(|_| {
assert_eq!(get_gil_count(), 1);
let pool = unsafe { GILPool::new() };
assert_eq!(get_gil_count(), 2);
let pool2 = unsafe { GILPool::new() };
assert_eq!(get_gil_count(), 3);
drop(pool);
assert_eq!(get_gil_count(), 2);
Python::with_gil(|_| {
assert_eq!(get_gil_count(), 2);
});
assert_eq!(get_gil_count(), 2);
drop(pool2);
assert_eq!(get_gil_count(), 1);
});
assert_eq!(get_gil_count(), 0);
}
#[test]
fn test_allow_threads() {
assert!(!gil_is_acquired());
Python::with_gil(|py| {
assert!(gil_is_acquired());
py.allow_threads(move || {
assert!(!gil_is_acquired());
Python::with_gil(|_| assert!(gil_is_acquired()));
assert!(!gil_is_acquired());
});
assert!(gil_is_acquired());
});
assert!(!gil_is_acquired());
}
#[test]
fn test_allow_threads_updates_refcounts() {
Python::with_gil(|py| {
let obj = get_object(py);
assert!(obj.get_refcnt(py) == 1);
let escaped_ref = py.allow_threads(|| obj.clone());
assert!(obj.get_refcnt(py) == 2);
drop(escaped_ref);
assert!(obj.get_refcnt(py) == 1);
drop(obj);
});
}
#[test]
fn dropping_gil_does_not_invalidate_references() {
Python::with_gil(|py| {
let obj = Python::with_gil(|_| py.eval("object()", None, None).unwrap());
assert_eq!(obj.get_refcnt(), 1);
})
}
#[test]
fn test_clone_with_gil() {
Python::with_gil(|py| {
let obj = get_object(py);
let count = obj.get_refcnt(py);
#[allow(clippy::redundant_clone)]
let c = obj.clone();
assert_eq!(count + 1, c.get_refcnt(py));
})
}
#[cfg(not(target_arch = "wasm32"))]
struct Event {
set: Mutex<bool>,
wait: Condvar,
}
#[cfg(not(target_arch = "wasm32"))]
impl Event {
const fn new() -> Self {
Self {
set: const_mutex(false),
wait: Condvar::new(),
}
}
fn set(&self) {
*self.set.lock() = true;
self.wait.notify_all();
}
fn wait(&self) {
let mut set = self.set.lock();
while !*set {
self.wait.wait(&mut set);
}
}
}
#[test]
#[cfg(not(target_arch = "wasm32"))] fn test_clone_without_gil() {
use crate::{Py, PyAny};
use std::{sync::Arc, thread};
static GIL_ACQUIRED: Event = Event::new();
static OBJECT_CLONED: Event = Event::new();
static REFCNT_CHECKED: Event = Event::new();
Python::with_gil(|py| {
let obj: Arc<Py<PyAny>> = Arc::new(get_object(py));
let thread_obj = Arc::clone(&obj);
let count = obj.get_refcnt(py);
println!(
"1: The object has been created and its reference count is {}",
count
);
let handle = thread::spawn(move || {
Python::with_gil(move |py| {
println!("3. The GIL has been acquired on another thread.");
GIL_ACQUIRED.set();
OBJECT_CLONED.wait();
println!("5. Checking refcnt");
assert_eq!(thread_obj.get_refcnt(py), count);
REFCNT_CHECKED.set();
})
});
let cloned = py.allow_threads(|| {
println!("2. The GIL has been released.");
GIL_ACQUIRED.wait();
println!("4. The other thread is now hogging the GIL, we clone without it held");
let cloned = Py::clone(&*obj);
OBJECT_CLONED.set();
cloned
});
REFCNT_CHECKED.wait();
println!("6. The main thread has acquired the GIL again and processed the pool.");
assert_eq!(obj.get_refcnt(py), count + 1);
drop(cloned);
handle.join().unwrap();
assert_eq!(Arc::try_unwrap(obj).unwrap().get_refcnt(py), count);
});
}
#[test]
#[cfg(not(target_arch = "wasm32"))] fn test_clone_in_other_thread() {
use crate::Py;
use std::{sync::Arc, thread};
static OBJECT_CLONED: Event = Event::new();
let (obj, count, ptr) = Python::with_gil(|py| {
let obj = Arc::new(get_object(py));
let count = obj.get_refcnt(py);
let thread_obj = Arc::clone(&obj);
let t = thread::spawn(move || {
#[allow(clippy::redundant_clone)]
let _ = Py::clone(&*thread_obj);
OBJECT_CLONED.set();
});
OBJECT_CLONED.wait();
assert_eq!(count, obj.get_refcnt(py));
t.join().unwrap();
let ptr = NonNull::new(obj.as_ptr()).unwrap();
assert!(POOL.pointer_ops.lock().0.contains(&ptr));
assert!(POOL.pointer_ops.lock().1.contains(&ptr));
(obj, count, ptr)
});
Python::with_gil(|py| {
assert!(!POOL.pointer_ops.lock().0.contains(&ptr));
assert!(!POOL.pointer_ops.lock().1.contains(&ptr));
assert_eq!(count, obj.get_refcnt(py));
});
}
#[test]
fn test_update_counts_does_not_deadlock() {
Python::with_gil(|py| {
let obj = get_object(py);
unsafe extern "C" fn capsule_drop(capsule: *mut ffi::PyObject) {
let pool = GILPool::new();
PyObject::from_owned_ptr(
pool.python(),
ffi::PyCapsule_GetPointer(capsule, std::ptr::null()) as _,
);
}
let ptr = obj.into_ptr();
let capsule =
unsafe { ffi::PyCapsule_New(ptr as _, std::ptr::null(), Some(capsule_drop)) };
POOL.register_decref(NonNull::new(capsule).unwrap());
POOL.update_counts(py);
})
}
}