alloc/
sync.rs

1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12#[cfg(not(no_global_oom_handling))]
13use core::clone::CloneToUninit;
14use core::clone::UseCloned;
15use core::cmp::Ordering;
16use core::hash::{Hash, Hasher};
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21use core::mem::{self, ManuallyDrop, align_of_val_raw};
22use core::num::NonZeroUsize;
23use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
24use core::panic::{RefUnwindSafe, UnwindSafe};
25use core::pin::{Pin, PinCoerceUnsized};
26use core::ptr::{self, NonNull};
27#[cfg(not(no_global_oom_handling))]
28use core::slice::from_raw_parts_mut;
29use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
30use core::sync::atomic::{self, Atomic};
31use core::{borrow, fmt, hint};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35use crate::alloc::{AllocError, Allocator, Global, Layout};
36use crate::borrow::{Cow, ToOwned};
37use crate::boxed::Box;
38use crate::rc::is_dangling;
39#[cfg(not(no_global_oom_handling))]
40use crate::string::String;
41#[cfg(not(no_global_oom_handling))]
42use crate::vec::Vec;
43
44/// A soft limit on the amount of references that may be made to an `Arc`.
45///
46/// Going above this limit will abort your program (although not
47/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
48/// Trying to go above it might call a `panic` (if not actually going above it).
49///
50/// This is a global invariant, and also applies when using a compare-exchange loop.
51///
52/// See comment in `Arc::clone`.
53const MAX_REFCOUNT: usize = (isize::MAX) as usize;
54
55/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
56const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
57
58#[cfg(not(sanitize = "thread"))]
59macro_rules! acquire {
60    ($x:expr) => {
61        atomic::fence(Acquire)
62    };
63}
64
65// ThreadSanitizer does not support memory fences. To avoid false positive
66// reports in Arc / Weak implementation use atomic loads for synchronization
67// instead.
68#[cfg(sanitize = "thread")]
69macro_rules! acquire {
70    ($x:expr) => {
71        $x.load(Acquire)
72    };
73}
74
75/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
76/// Reference Counted'.
77///
78/// The type `Arc<T>` provides shared ownership of a value of type `T`,
79/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
80/// a new `Arc` instance, which points to the same allocation on the heap as the
81/// source `Arc`, while increasing a reference count. When the last `Arc`
82/// pointer to a given allocation is destroyed, the value stored in that allocation (often
83/// referred to as "inner value") is also dropped.
84///
85/// Shared references in Rust disallow mutation by default, and `Arc` is no
86/// exception: you cannot generally obtain a mutable reference to something
87/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
88///
89/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
90///    [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
91///
92/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
93///    without requiring interior mutability. This approach clones the data only when
94///    needed (when there are multiple references) and can be more efficient when mutations
95///    are infrequent.
96///
97/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
98///    which provides direct mutable access to the inner value without any cloning.
99///
100/// ```
101/// use std::sync::Arc;
102///
103/// let mut data = Arc::new(vec![1, 2, 3]);
104///
105/// // This will clone the vector only if there are other references to it
106/// Arc::make_mut(&mut data).push(4);
107///
108/// assert_eq!(*data, vec![1, 2, 3, 4]);
109/// ```
110///
111/// **Note**: This type is only available on platforms that support atomic
112/// loads and stores of pointers, which includes all platforms that support
113/// the `std` crate but not all those which only support [`alloc`](crate).
114/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
115///
116/// ## Thread Safety
117///
118/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
119/// counting. This means that it is thread-safe. The disadvantage is that
120/// atomic operations are more expensive than ordinary memory accesses. If you
121/// are not sharing reference-counted allocations between threads, consider using
122/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
123/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
124/// However, a library might choose `Arc<T>` in order to give library consumers
125/// more flexibility.
126///
127/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
128/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
129/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
130/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
131/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
132/// data, but it  doesn't add thread safety to its data. Consider
133/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
134/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
135/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
136/// non-atomic operations.
137///
138/// In the end, this means that you may need to pair `Arc<T>` with some sort of
139/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
140///
141/// ## Breaking cycles with `Weak`
142///
143/// The [`downgrade`][downgrade] method can be used to create a non-owning
144/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
145/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
146/// already been dropped. In other words, `Weak` pointers do not keep the value
147/// inside the allocation alive; however, they *do* keep the allocation
148/// (the backing store for the value) alive.
149///
150/// A cycle between `Arc` pointers will never be deallocated. For this reason,
151/// [`Weak`] is used to break cycles. For example, a tree could have
152/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
153/// pointers from children back to their parents.
154///
155/// # Cloning references
156///
157/// Creating a new reference from an existing reference-counted pointer is done using the
158/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
159///
160/// ```
161/// use std::sync::Arc;
162/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
163/// // The two syntaxes below are equivalent.
164/// let a = foo.clone();
165/// let b = Arc::clone(&foo);
166/// // a, b, and foo are all Arcs that point to the same memory location
167/// ```
168///
169/// ## `Deref` behavior
170///
171/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
172/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
173/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
174/// functions, called using [fully qualified syntax]:
175///
176/// ```
177/// use std::sync::Arc;
178///
179/// let my_arc = Arc::new(());
180/// let my_weak = Arc::downgrade(&my_arc);
181/// ```
182///
183/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
184/// fully qualified syntax. Some people prefer to use fully qualified syntax,
185/// while others prefer using method-call syntax.
186///
187/// ```
188/// use std::sync::Arc;
189///
190/// let arc = Arc::new(());
191/// // Method-call syntax
192/// let arc2 = arc.clone();
193/// // Fully qualified syntax
194/// let arc3 = Arc::clone(&arc);
195/// ```
196///
197/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
198/// already been dropped.
199///
200/// [`Rc<T>`]: crate::rc::Rc
201/// [clone]: Clone::clone
202/// [mutex]: ../../std/sync/struct.Mutex.html
203/// [rwlock]: ../../std/sync/struct.RwLock.html
204/// [atomic]: core::sync::atomic
205/// [downgrade]: Arc::downgrade
206/// [upgrade]: Weak::upgrade
207/// [RefCell\<T>]: core::cell::RefCell
208/// [`RefCell<T>`]: core::cell::RefCell
209/// [`std::sync`]: ../../std/sync/index.html
210/// [`Arc::clone(&from)`]: Arc::clone
211/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
212///
213/// # Examples
214///
215/// Sharing some immutable data between threads:
216///
217/// ```
218/// use std::sync::Arc;
219/// use std::thread;
220///
221/// let five = Arc::new(5);
222///
223/// for _ in 0..10 {
224///     let five = Arc::clone(&five);
225///
226///     thread::spawn(move || {
227///         println!("{five:?}");
228///     });
229/// }
230/// ```
231///
232/// Sharing a mutable [`AtomicUsize`]:
233///
234/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
235///
236/// ```
237/// use std::sync::Arc;
238/// use std::sync::atomic::{AtomicUsize, Ordering};
239/// use std::thread;
240///
241/// let val = Arc::new(AtomicUsize::new(5));
242///
243/// for _ in 0..10 {
244///     let val = Arc::clone(&val);
245///
246///     thread::spawn(move || {
247///         let v = val.fetch_add(1, Ordering::Relaxed);
248///         println!("{v:?}");
249///     });
250/// }
251/// ```
252///
253/// See the [`rc` documentation][rc_examples] for more examples of reference
254/// counting in general.
255///
256/// [rc_examples]: crate::rc#examples
257#[doc(search_unbox)]
258#[rustc_diagnostic_item = "Arc"]
259#[stable(feature = "rust1", since = "1.0.0")]
260#[rustc_insignificant_dtor]
261pub struct Arc<
262    T: ?Sized,
263    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
264> {
265    ptr: NonNull<ArcInner<T>>,
266    phantom: PhantomData<ArcInner<T>>,
267    alloc: A,
268}
269
270#[stable(feature = "rust1", since = "1.0.0")]
271unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
272#[stable(feature = "rust1", since = "1.0.0")]
273unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
274
275#[stable(feature = "catch_unwind", since = "1.9.0")]
276impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
277
278#[unstable(feature = "coerce_unsized", issue = "18598")]
279impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
280
281#[unstable(feature = "dispatch_from_dyn", issue = "none")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
283
284impl<T: ?Sized> Arc<T> {
285    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
286        unsafe { Self::from_inner_in(ptr, Global) }
287    }
288
289    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
290        unsafe { Self::from_ptr_in(ptr, Global) }
291    }
292}
293
294impl<T: ?Sized, A: Allocator> Arc<T, A> {
295    #[inline]
296    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
297        let this = mem::ManuallyDrop::new(this);
298        (this.ptr, unsafe { ptr::read(&this.alloc) })
299    }
300
301    #[inline]
302    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
303        Self { ptr, phantom: PhantomData, alloc }
304    }
305
306    #[inline]
307    unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
308        unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
309    }
310}
311
312/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
313/// managed allocation.
314///
315/// The allocation is accessed by calling [`upgrade`] on the `Weak`
316/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
317///
318/// Since a `Weak` reference does not count towards ownership, it will not
319/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
320/// guarantees about the value still being present. Thus it may return [`None`]
321/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
322/// itself (the backing store) from being deallocated.
323///
324/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
325/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
326/// prevent circular references between [`Arc`] pointers, since mutual owning references
327/// would never allow either [`Arc`] to be dropped. For example, a tree could
328/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
329/// pointers from children back to their parents.
330///
331/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
332///
333/// [`upgrade`]: Weak::upgrade
334#[stable(feature = "arc_weak", since = "1.4.0")]
335#[rustc_diagnostic_item = "ArcWeak"]
336pub struct Weak<
337    T: ?Sized,
338    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
339> {
340    // This is a `NonNull` to allow optimizing the size of this type in enums,
341    // but it is not necessarily a valid pointer.
342    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
343    // to allocate space on the heap. That's not a value a real pointer
344    // will ever have because RcInner has alignment at least 2.
345    // This is only possible when `T: Sized`; unsized `T` never dangle.
346    ptr: NonNull<ArcInner<T>>,
347    alloc: A,
348}
349
350#[stable(feature = "arc_weak", since = "1.4.0")]
351unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
352#[stable(feature = "arc_weak", since = "1.4.0")]
353unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
354
355#[unstable(feature = "coerce_unsized", issue = "18598")]
356impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
357#[unstable(feature = "dispatch_from_dyn", issue = "none")]
358impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
359
360#[stable(feature = "arc_weak", since = "1.4.0")]
361impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
362    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
363        write!(f, "(Weak)")
364    }
365}
366
367// This is repr(C) to future-proof against possible field-reordering, which
368// would interfere with otherwise safe [into|from]_raw() of transmutable
369// inner types.
370#[repr(C)]
371struct ArcInner<T: ?Sized> {
372    strong: Atomic<usize>,
373
374    // the value usize::MAX acts as a sentinel for temporarily "locking" the
375    // ability to upgrade weak pointers or downgrade strong ones; this is used
376    // to avoid races in `make_mut` and `get_mut`.
377    weak: Atomic<usize>,
378
379    data: T,
380}
381
382/// Calculate layout for `ArcInner<T>` using the inner value's layout
383fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
384    // Calculate layout using the given value layout.
385    // Previously, layout was calculated on the expression
386    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
387    // reference (see #54908).
388    Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
389}
390
391unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
392unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
393
394impl<T> Arc<T> {
395    /// Constructs a new `Arc<T>`.
396    ///
397    /// # Examples
398    ///
399    /// ```
400    /// use std::sync::Arc;
401    ///
402    /// let five = Arc::new(5);
403    /// ```
404    #[cfg(not(no_global_oom_handling))]
405    #[inline]
406    #[stable(feature = "rust1", since = "1.0.0")]
407    pub fn new(data: T) -> Arc<T> {
408        // Start the weak pointer count as 1 which is the weak pointer that's
409        // held by all the strong pointers (kinda), see std/rc.rs for more info
410        let x: Box<_> = Box::new(ArcInner {
411            strong: atomic::AtomicUsize::new(1),
412            weak: atomic::AtomicUsize::new(1),
413            data,
414        });
415        unsafe { Self::from_inner(Box::leak(x).into()) }
416    }
417
418    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
419    /// to allow you to construct a `T` which holds a weak pointer to itself.
420    ///
421    /// Generally, a structure circularly referencing itself, either directly or
422    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
423    /// Using this function, you get access to the weak pointer during the
424    /// initialization of `T`, before the `Arc<T>` is created, such that you can
425    /// clone and store it inside the `T`.
426    ///
427    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
428    /// then calls your closure, giving it a `Weak<T>` to this allocation,
429    /// and only afterwards completes the construction of the `Arc<T>` by placing
430    /// the `T` returned from your closure into the allocation.
431    ///
432    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
433    /// returns, calling [`upgrade`] on the weak reference inside your closure will
434    /// fail and result in a `None` value.
435    ///
436    /// # Panics
437    ///
438    /// If `data_fn` panics, the panic is propagated to the caller, and the
439    /// temporary [`Weak<T>`] is dropped normally.
440    ///
441    /// # Example
442    ///
443    /// ```
444    /// # #![allow(dead_code)]
445    /// use std::sync::{Arc, Weak};
446    ///
447    /// struct Gadget {
448    ///     me: Weak<Gadget>,
449    /// }
450    ///
451    /// impl Gadget {
452    ///     /// Constructs a reference counted Gadget.
453    ///     fn new() -> Arc<Self> {
454    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
455    ///         // `Arc` we're constructing.
456    ///         Arc::new_cyclic(|me| {
457    ///             // Create the actual struct here.
458    ///             Gadget { me: me.clone() }
459    ///         })
460    ///     }
461    ///
462    ///     /// Returns a reference counted pointer to Self.
463    ///     fn me(&self) -> Arc<Self> {
464    ///         self.me.upgrade().unwrap()
465    ///     }
466    /// }
467    /// ```
468    /// [`upgrade`]: Weak::upgrade
469    #[cfg(not(no_global_oom_handling))]
470    #[inline]
471    #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
472    pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
473    where
474        F: FnOnce(&Weak<T>) -> T,
475    {
476        Self::new_cyclic_in(data_fn, Global)
477    }
478
479    /// Constructs a new `Arc` with uninitialized contents.
480    ///
481    /// # Examples
482    ///
483    /// ```
484    /// #![feature(get_mut_unchecked)]
485    ///
486    /// use std::sync::Arc;
487    ///
488    /// let mut five = Arc::<u32>::new_uninit();
489    ///
490    /// // Deferred initialization:
491    /// Arc::get_mut(&mut five).unwrap().write(5);
492    ///
493    /// let five = unsafe { five.assume_init() };
494    ///
495    /// assert_eq!(*five, 5)
496    /// ```
497    #[cfg(not(no_global_oom_handling))]
498    #[inline]
499    #[stable(feature = "new_uninit", since = "1.82.0")]
500    #[must_use]
501    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
502        unsafe {
503            Arc::from_ptr(Arc::allocate_for_layout(
504                Layout::new::<T>(),
505                |layout| Global.allocate(layout),
506                <*mut u8>::cast,
507            ))
508        }
509    }
510
511    /// Constructs a new `Arc` with uninitialized contents, with the memory
512    /// being filled with `0` bytes.
513    ///
514    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
515    /// of this method.
516    ///
517    /// # Examples
518    ///
519    /// ```
520    /// #![feature(new_zeroed_alloc)]
521    ///
522    /// use std::sync::Arc;
523    ///
524    /// let zero = Arc::<u32>::new_zeroed();
525    /// let zero = unsafe { zero.assume_init() };
526    ///
527    /// assert_eq!(*zero, 0)
528    /// ```
529    ///
530    /// [zeroed]: mem::MaybeUninit::zeroed
531    #[cfg(not(no_global_oom_handling))]
532    #[inline]
533    #[unstable(feature = "new_zeroed_alloc", issue = "129396")]
534    #[must_use]
535    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
536        unsafe {
537            Arc::from_ptr(Arc::allocate_for_layout(
538                Layout::new::<T>(),
539                |layout| Global.allocate_zeroed(layout),
540                <*mut u8>::cast,
541            ))
542        }
543    }
544
545    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
546    /// `data` will be pinned in memory and unable to be moved.
547    #[cfg(not(no_global_oom_handling))]
548    #[stable(feature = "pin", since = "1.33.0")]
549    #[must_use]
550    pub fn pin(data: T) -> Pin<Arc<T>> {
551        unsafe { Pin::new_unchecked(Arc::new(data)) }
552    }
553
554    /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
555    #[unstable(feature = "allocator_api", issue = "32838")]
556    #[inline]
557    pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
558        unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
559    }
560
561    /// Constructs a new `Arc<T>`, returning an error if allocation fails.
562    ///
563    /// # Examples
564    ///
565    /// ```
566    /// #![feature(allocator_api)]
567    /// use std::sync::Arc;
568    ///
569    /// let five = Arc::try_new(5)?;
570    /// # Ok::<(), std::alloc::AllocError>(())
571    /// ```
572    #[unstable(feature = "allocator_api", issue = "32838")]
573    #[inline]
574    pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
575        // Start the weak pointer count as 1 which is the weak pointer that's
576        // held by all the strong pointers (kinda), see std/rc.rs for more info
577        let x: Box<_> = Box::try_new(ArcInner {
578            strong: atomic::AtomicUsize::new(1),
579            weak: atomic::AtomicUsize::new(1),
580            data,
581        })?;
582        unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
583    }
584
585    /// Constructs a new `Arc` with uninitialized contents, returning an error
586    /// if allocation fails.
587    ///
588    /// # Examples
589    ///
590    /// ```
591    /// #![feature(allocator_api)]
592    /// #![feature(get_mut_unchecked)]
593    ///
594    /// use std::sync::Arc;
595    ///
596    /// let mut five = Arc::<u32>::try_new_uninit()?;
597    ///
598    /// // Deferred initialization:
599    /// Arc::get_mut(&mut five).unwrap().write(5);
600    ///
601    /// let five = unsafe { five.assume_init() };
602    ///
603    /// assert_eq!(*five, 5);
604    /// # Ok::<(), std::alloc::AllocError>(())
605    /// ```
606    #[unstable(feature = "allocator_api", issue = "32838")]
607    // #[unstable(feature = "new_uninit", issue = "63291")]
608    pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
609        unsafe {
610            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
611                Layout::new::<T>(),
612                |layout| Global.allocate(layout),
613                <*mut u8>::cast,
614            )?))
615        }
616    }
617
618    /// Constructs a new `Arc` with uninitialized contents, with the memory
619    /// being filled with `0` bytes, returning an error if allocation fails.
620    ///
621    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
622    /// of this method.
623    ///
624    /// # Examples
625    ///
626    /// ```
627    /// #![feature( allocator_api)]
628    ///
629    /// use std::sync::Arc;
630    ///
631    /// let zero = Arc::<u32>::try_new_zeroed()?;
632    /// let zero = unsafe { zero.assume_init() };
633    ///
634    /// assert_eq!(*zero, 0);
635    /// # Ok::<(), std::alloc::AllocError>(())
636    /// ```
637    ///
638    /// [zeroed]: mem::MaybeUninit::zeroed
639    #[unstable(feature = "allocator_api", issue = "32838")]
640    // #[unstable(feature = "new_uninit", issue = "63291")]
641    pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
642        unsafe {
643            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
644                Layout::new::<T>(),
645                |layout| Global.allocate_zeroed(layout),
646                <*mut u8>::cast,
647            )?))
648        }
649    }
650}
651
652impl<T, A: Allocator> Arc<T, A> {
653    /// Constructs a new `Arc<T>` in the provided allocator.
654    ///
655    /// # Examples
656    ///
657    /// ```
658    /// #![feature(allocator_api)]
659    ///
660    /// use std::sync::Arc;
661    /// use std::alloc::System;
662    ///
663    /// let five = Arc::new_in(5, System);
664    /// ```
665    #[inline]
666    #[cfg(not(no_global_oom_handling))]
667    #[unstable(feature = "allocator_api", issue = "32838")]
668    pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
669        // Start the weak pointer count as 1 which is the weak pointer that's
670        // held by all the strong pointers (kinda), see std/rc.rs for more info
671        let x = Box::new_in(
672            ArcInner {
673                strong: atomic::AtomicUsize::new(1),
674                weak: atomic::AtomicUsize::new(1),
675                data,
676            },
677            alloc,
678        );
679        let (ptr, alloc) = Box::into_unique(x);
680        unsafe { Self::from_inner_in(ptr.into(), alloc) }
681    }
682
683    /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
684    ///
685    /// # Examples
686    ///
687    /// ```
688    /// #![feature(get_mut_unchecked)]
689    /// #![feature(allocator_api)]
690    ///
691    /// use std::sync::Arc;
692    /// use std::alloc::System;
693    ///
694    /// let mut five = Arc::<u32, _>::new_uninit_in(System);
695    ///
696    /// let five = unsafe {
697    ///     // Deferred initialization:
698    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
699    ///
700    ///     five.assume_init()
701    /// };
702    ///
703    /// assert_eq!(*five, 5)
704    /// ```
705    #[cfg(not(no_global_oom_handling))]
706    #[unstable(feature = "allocator_api", issue = "32838")]
707    // #[unstable(feature = "new_uninit", issue = "63291")]
708    #[inline]
709    pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
710        unsafe {
711            Arc::from_ptr_in(
712                Arc::allocate_for_layout(
713                    Layout::new::<T>(),
714                    |layout| alloc.allocate(layout),
715                    <*mut u8>::cast,
716                ),
717                alloc,
718            )
719        }
720    }
721
722    /// Constructs a new `Arc` with uninitialized contents, with the memory
723    /// being filled with `0` bytes, in the provided allocator.
724    ///
725    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
726    /// of this method.
727    ///
728    /// # Examples
729    ///
730    /// ```
731    /// #![feature(allocator_api)]
732    ///
733    /// use std::sync::Arc;
734    /// use std::alloc::System;
735    ///
736    /// let zero = Arc::<u32, _>::new_zeroed_in(System);
737    /// let zero = unsafe { zero.assume_init() };
738    ///
739    /// assert_eq!(*zero, 0)
740    /// ```
741    ///
742    /// [zeroed]: mem::MaybeUninit::zeroed
743    #[cfg(not(no_global_oom_handling))]
744    #[unstable(feature = "allocator_api", issue = "32838")]
745    // #[unstable(feature = "new_uninit", issue = "63291")]
746    #[inline]
747    pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
748        unsafe {
749            Arc::from_ptr_in(
750                Arc::allocate_for_layout(
751                    Layout::new::<T>(),
752                    |layout| alloc.allocate_zeroed(layout),
753                    <*mut u8>::cast,
754                ),
755                alloc,
756            )
757        }
758    }
759
760    /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
761    /// to allow you to construct a `T` which holds a weak pointer to itself.
762    ///
763    /// Generally, a structure circularly referencing itself, either directly or
764    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
765    /// Using this function, you get access to the weak pointer during the
766    /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
767    /// clone and store it inside the `T`.
768    ///
769    /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
770    /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
771    /// and only afterwards completes the construction of the `Arc<T, A>` by placing
772    /// the `T` returned from your closure into the allocation.
773    ///
774    /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
775    /// returns, calling [`upgrade`] on the weak reference inside your closure will
776    /// fail and result in a `None` value.
777    ///
778    /// # Panics
779    ///
780    /// If `data_fn` panics, the panic is propagated to the caller, and the
781    /// temporary [`Weak<T>`] is dropped normally.
782    ///
783    /// # Example
784    ///
785    /// See [`new_cyclic`]
786    ///
787    /// [`new_cyclic`]: Arc::new_cyclic
788    /// [`upgrade`]: Weak::upgrade
789    #[cfg(not(no_global_oom_handling))]
790    #[inline]
791    #[unstable(feature = "allocator_api", issue = "32838")]
792    pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
793    where
794        F: FnOnce(&Weak<T, A>) -> T,
795    {
796        // Construct the inner in the "uninitialized" state with a single
797        // weak reference.
798        let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
799            ArcInner {
800                strong: atomic::AtomicUsize::new(0),
801                weak: atomic::AtomicUsize::new(1),
802                data: mem::MaybeUninit::<T>::uninit(),
803            },
804            alloc,
805        ));
806        let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
807        let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
808
809        let weak = Weak { ptr: init_ptr, alloc };
810
811        // It's important we don't give up ownership of the weak pointer, or
812        // else the memory might be freed by the time `data_fn` returns. If
813        // we really wanted to pass ownership, we could create an additional
814        // weak pointer for ourselves, but this would result in additional
815        // updates to the weak reference count which might not be necessary
816        // otherwise.
817        let data = data_fn(&weak);
818
819        // Now we can properly initialize the inner value and turn our weak
820        // reference into a strong reference.
821        let strong = unsafe {
822            let inner = init_ptr.as_ptr();
823            ptr::write(&raw mut (*inner).data, data);
824
825            // The above write to the data field must be visible to any threads which
826            // observe a non-zero strong count. Therefore we need at least "Release" ordering
827            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
828            //
829            // "Acquire" ordering is not required. When considering the possible behaviors
830            // of `data_fn` we only need to look at what it could do with a reference to a
831            // non-upgradeable `Weak`:
832            // - It can *clone* the `Weak`, increasing the weak reference count.
833            // - It can drop those clones, decreasing the weak reference count (but never to zero).
834            //
835            // These side effects do not impact us in any way, and no other side effects are
836            // possible with safe code alone.
837            let prev_value = (*inner).strong.fetch_add(1, Release);
838            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
839
840            // Strong references should collectively own a shared weak reference,
841            // so don't run the destructor for our old weak reference.
842            // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
843            // and forgetting the weak reference.
844            let alloc = weak.into_raw_with_allocator().1;
845
846            Arc::from_inner_in(init_ptr, alloc)
847        };
848
849        strong
850    }
851
852    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
853    /// then `data` will be pinned in memory and unable to be moved.
854    #[cfg(not(no_global_oom_handling))]
855    #[unstable(feature = "allocator_api", issue = "32838")]
856    #[inline]
857    pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
858    where
859        A: 'static,
860    {
861        unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
862    }
863
864    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
865    /// fails.
866    #[inline]
867    #[unstable(feature = "allocator_api", issue = "32838")]
868    pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
869    where
870        A: 'static,
871    {
872        unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
873    }
874
875    /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
876    ///
877    /// # Examples
878    ///
879    /// ```
880    /// #![feature(allocator_api)]
881    ///
882    /// use std::sync::Arc;
883    /// use std::alloc::System;
884    ///
885    /// let five = Arc::try_new_in(5, System)?;
886    /// # Ok::<(), std::alloc::AllocError>(())
887    /// ```
888    #[inline]
889    #[unstable(feature = "allocator_api", issue = "32838")]
890    #[inline]
891    pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
892        // Start the weak pointer count as 1 which is the weak pointer that's
893        // held by all the strong pointers (kinda), see std/rc.rs for more info
894        let x = Box::try_new_in(
895            ArcInner {
896                strong: atomic::AtomicUsize::new(1),
897                weak: atomic::AtomicUsize::new(1),
898                data,
899            },
900            alloc,
901        )?;
902        let (ptr, alloc) = Box::into_unique(x);
903        Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
904    }
905
906    /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
907    /// error if allocation fails.
908    ///
909    /// # Examples
910    ///
911    /// ```
912    /// #![feature(allocator_api)]
913    /// #![feature(get_mut_unchecked)]
914    ///
915    /// use std::sync::Arc;
916    /// use std::alloc::System;
917    ///
918    /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
919    ///
920    /// let five = unsafe {
921    ///     // Deferred initialization:
922    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
923    ///
924    ///     five.assume_init()
925    /// };
926    ///
927    /// assert_eq!(*five, 5);
928    /// # Ok::<(), std::alloc::AllocError>(())
929    /// ```
930    #[unstable(feature = "allocator_api", issue = "32838")]
931    // #[unstable(feature = "new_uninit", issue = "63291")]
932    #[inline]
933    pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
934        unsafe {
935            Ok(Arc::from_ptr_in(
936                Arc::try_allocate_for_layout(
937                    Layout::new::<T>(),
938                    |layout| alloc.allocate(layout),
939                    <*mut u8>::cast,
940                )?,
941                alloc,
942            ))
943        }
944    }
945
946    /// Constructs a new `Arc` with uninitialized contents, with the memory
947    /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
948    /// fails.
949    ///
950    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
951    /// of this method.
952    ///
953    /// # Examples
954    ///
955    /// ```
956    /// #![feature(allocator_api)]
957    ///
958    /// use std::sync::Arc;
959    /// use std::alloc::System;
960    ///
961    /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
962    /// let zero = unsafe { zero.assume_init() };
963    ///
964    /// assert_eq!(*zero, 0);
965    /// # Ok::<(), std::alloc::AllocError>(())
966    /// ```
967    ///
968    /// [zeroed]: mem::MaybeUninit::zeroed
969    #[unstable(feature = "allocator_api", issue = "32838")]
970    // #[unstable(feature = "new_uninit", issue = "63291")]
971    #[inline]
972    pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
973        unsafe {
974            Ok(Arc::from_ptr_in(
975                Arc::try_allocate_for_layout(
976                    Layout::new::<T>(),
977                    |layout| alloc.allocate_zeroed(layout),
978                    <*mut u8>::cast,
979                )?,
980                alloc,
981            ))
982        }
983    }
984    /// Returns the inner value, if the `Arc` has exactly one strong reference.
985    ///
986    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
987    /// passed in.
988    ///
989    /// This will succeed even if there are outstanding weak references.
990    ///
991    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
992    /// keep the `Arc` in the [`Err`] case.
993    /// Immediately dropping the [`Err`]-value, as the expression
994    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
995    /// drop to zero and the inner value of the `Arc` to be dropped.
996    /// For instance, if two threads execute such an expression in parallel,
997    /// there is a race condition without the possibility of unsafety:
998    /// The threads could first both check whether they own the last instance
999    /// in `Arc::try_unwrap`, determine that they both do not, and then both
1000    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
1001    /// In this scenario, the value inside the `Arc` is safely destroyed
1002    /// by exactly one of the threads, but neither thread will ever be able
1003    /// to use the value.
1004    ///
1005    /// # Examples
1006    ///
1007    /// ```
1008    /// use std::sync::Arc;
1009    ///
1010    /// let x = Arc::new(3);
1011    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1012    ///
1013    /// let x = Arc::new(4);
1014    /// let _y = Arc::clone(&x);
1015    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1016    /// ```
1017    #[inline]
1018    #[stable(feature = "arc_unique", since = "1.4.0")]
1019    pub fn try_unwrap(this: Self) -> Result<T, Self> {
1020        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1021            return Err(this);
1022        }
1023
1024        acquire!(this.inner().strong);
1025
1026        let this = ManuallyDrop::new(this);
1027        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1028        let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1029
1030        // Make a weak pointer to clean up the implicit strong-weak reference
1031        let _weak = Weak { ptr: this.ptr, alloc };
1032
1033        Ok(elem)
1034    }
1035
1036    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1037    ///
1038    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1039    ///
1040    /// This will succeed even if there are outstanding weak references.
1041    ///
1042    /// If `Arc::into_inner` is called on every clone of this `Arc`,
1043    /// it is guaranteed that exactly one of the calls returns the inner value.
1044    /// This means in particular that the inner value is not dropped.
1045    ///
1046    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1047    /// is meant for different use-cases. If used as a direct replacement
1048    /// for `Arc::into_inner` anyway, such as with the expression
1049    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1050    /// **not** give the same guarantee as described in the previous paragraph.
1051    /// For more information, see the examples below and read the documentation
1052    /// of [`Arc::try_unwrap`].
1053    ///
1054    /// # Examples
1055    ///
1056    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1057    /// ```
1058    /// use std::sync::Arc;
1059    ///
1060    /// let x = Arc::new(3);
1061    /// let y = Arc::clone(&x);
1062    ///
1063    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1064    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1065    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1066    ///
1067    /// let x_inner_value = x_thread.join().unwrap();
1068    /// let y_inner_value = y_thread.join().unwrap();
1069    ///
1070    /// // One of the threads is guaranteed to receive the inner value:
1071    /// assert!(matches!(
1072    ///     (x_inner_value, y_inner_value),
1073    ///     (None, Some(3)) | (Some(3), None)
1074    /// ));
1075    /// // The result could also be `(None, None)` if the threads called
1076    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1077    /// ```
1078    ///
1079    /// A more practical example demonstrating the need for `Arc::into_inner`:
1080    /// ```
1081    /// use std::sync::Arc;
1082    ///
1083    /// // Definition of a simple singly linked list using `Arc`:
1084    /// #[derive(Clone)]
1085    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1086    /// struct Node<T>(T, Option<Arc<Node<T>>>);
1087    ///
1088    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1089    /// // can cause a stack overflow. To prevent this, we can provide a
1090    /// // manual `Drop` implementation that does the destruction in a loop:
1091    /// impl<T> Drop for LinkedList<T> {
1092    ///     fn drop(&mut self) {
1093    ///         let mut link = self.0.take();
1094    ///         while let Some(arc_node) = link.take() {
1095    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1096    ///                 link = next;
1097    ///             }
1098    ///         }
1099    ///     }
1100    /// }
1101    ///
1102    /// // Implementation of `new` and `push` omitted
1103    /// impl<T> LinkedList<T> {
1104    ///     /* ... */
1105    /// #   fn new() -> Self {
1106    /// #       LinkedList(None)
1107    /// #   }
1108    /// #   fn push(&mut self, x: T) {
1109    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
1110    /// #   }
1111    /// }
1112    ///
1113    /// // The following code could have still caused a stack overflow
1114    /// // despite the manual `Drop` impl if that `Drop` impl had used
1115    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1116    ///
1117    /// // Create a long list and clone it
1118    /// let mut x = LinkedList::new();
1119    /// let size = 100000;
1120    /// # let size = if cfg!(miri) { 100 } else { size };
1121    /// for i in 0..size {
1122    ///     x.push(i); // Adds i to the front of x
1123    /// }
1124    /// let y = x.clone();
1125    ///
1126    /// // Drop the clones in parallel
1127    /// let x_thread = std::thread::spawn(|| drop(x));
1128    /// let y_thread = std::thread::spawn(|| drop(y));
1129    /// x_thread.join().unwrap();
1130    /// y_thread.join().unwrap();
1131    /// ```
1132    #[inline]
1133    #[stable(feature = "arc_into_inner", since = "1.70.0")]
1134    pub fn into_inner(this: Self) -> Option<T> {
1135        // Make sure that the ordinary `Drop` implementation isn’t called as well
1136        let mut this = mem::ManuallyDrop::new(this);
1137
1138        // Following the implementation of `drop` and `drop_slow`
1139        if this.inner().strong.fetch_sub(1, Release) != 1 {
1140            return None;
1141        }
1142
1143        acquire!(this.inner().strong);
1144
1145        // SAFETY: This mirrors the line
1146        //
1147        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1148        //
1149        // in `drop_slow`. Instead of dropping the value behind the pointer,
1150        // it is read and eventually returned; `ptr::read` has the same
1151        // safety conditions as `ptr::drop_in_place`.
1152
1153        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1154        let alloc = unsafe { ptr::read(&this.alloc) };
1155
1156        drop(Weak { ptr: this.ptr, alloc });
1157
1158        Some(inner)
1159    }
1160}
1161
1162impl<T> Arc<[T]> {
1163    /// Constructs a new atomically reference-counted slice with uninitialized contents.
1164    ///
1165    /// # Examples
1166    ///
1167    /// ```
1168    /// #![feature(get_mut_unchecked)]
1169    ///
1170    /// use std::sync::Arc;
1171    ///
1172    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1173    ///
1174    /// // Deferred initialization:
1175    /// let data = Arc::get_mut(&mut values).unwrap();
1176    /// data[0].write(1);
1177    /// data[1].write(2);
1178    /// data[2].write(3);
1179    ///
1180    /// let values = unsafe { values.assume_init() };
1181    ///
1182    /// assert_eq!(*values, [1, 2, 3])
1183    /// ```
1184    #[cfg(not(no_global_oom_handling))]
1185    #[inline]
1186    #[stable(feature = "new_uninit", since = "1.82.0")]
1187    #[must_use]
1188    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1189        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1190    }
1191
1192    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1193    /// filled with `0` bytes.
1194    ///
1195    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1196    /// incorrect usage of this method.
1197    ///
1198    /// # Examples
1199    ///
1200    /// ```
1201    /// #![feature(new_zeroed_alloc)]
1202    ///
1203    /// use std::sync::Arc;
1204    ///
1205    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1206    /// let values = unsafe { values.assume_init() };
1207    ///
1208    /// assert_eq!(*values, [0, 0, 0])
1209    /// ```
1210    ///
1211    /// [zeroed]: mem::MaybeUninit::zeroed
1212    #[cfg(not(no_global_oom_handling))]
1213    #[inline]
1214    #[unstable(feature = "new_zeroed_alloc", issue = "129396")]
1215    #[must_use]
1216    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1217        unsafe {
1218            Arc::from_ptr(Arc::allocate_for_layout(
1219                Layout::array::<T>(len).unwrap(),
1220                |layout| Global.allocate_zeroed(layout),
1221                |mem| {
1222                    ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1223                        as *mut ArcInner<[mem::MaybeUninit<T>]>
1224                },
1225            ))
1226        }
1227    }
1228
1229    /// Converts the reference-counted slice into a reference-counted array.
1230    ///
1231    /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1232    ///
1233    /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1234    #[unstable(feature = "slice_as_array", issue = "133508")]
1235    #[inline]
1236    #[must_use]
1237    pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1238        if self.len() == N {
1239            let ptr = Self::into_raw(self) as *const [T; N];
1240
1241            // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1242            let me = unsafe { Arc::from_raw(ptr) };
1243            Some(me)
1244        } else {
1245            None
1246        }
1247    }
1248}
1249
1250impl<T, A: Allocator> Arc<[T], A> {
1251    /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1252    /// provided allocator.
1253    ///
1254    /// # Examples
1255    ///
1256    /// ```
1257    /// #![feature(get_mut_unchecked)]
1258    /// #![feature(allocator_api)]
1259    ///
1260    /// use std::sync::Arc;
1261    /// use std::alloc::System;
1262    ///
1263    /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1264    ///
1265    /// let values = unsafe {
1266    ///     // Deferred initialization:
1267    ///     Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1268    ///     Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1269    ///     Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1270    ///
1271    ///     values.assume_init()
1272    /// };
1273    ///
1274    /// assert_eq!(*values, [1, 2, 3])
1275    /// ```
1276    #[cfg(not(no_global_oom_handling))]
1277    #[unstable(feature = "allocator_api", issue = "32838")]
1278    #[inline]
1279    pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1280        unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1281    }
1282
1283    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1284    /// filled with `0` bytes, in the provided allocator.
1285    ///
1286    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1287    /// incorrect usage of this method.
1288    ///
1289    /// # Examples
1290    ///
1291    /// ```
1292    /// #![feature(allocator_api)]
1293    ///
1294    /// use std::sync::Arc;
1295    /// use std::alloc::System;
1296    ///
1297    /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1298    /// let values = unsafe { values.assume_init() };
1299    ///
1300    /// assert_eq!(*values, [0, 0, 0])
1301    /// ```
1302    ///
1303    /// [zeroed]: mem::MaybeUninit::zeroed
1304    #[cfg(not(no_global_oom_handling))]
1305    #[unstable(feature = "allocator_api", issue = "32838")]
1306    #[inline]
1307    pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1308        unsafe {
1309            Arc::from_ptr_in(
1310                Arc::allocate_for_layout(
1311                    Layout::array::<T>(len).unwrap(),
1312                    |layout| alloc.allocate_zeroed(layout),
1313                    |mem| {
1314                        ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1315                            as *mut ArcInner<[mem::MaybeUninit<T>]>
1316                    },
1317                ),
1318                alloc,
1319            )
1320        }
1321    }
1322}
1323
1324impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1325    /// Converts to `Arc<T>`.
1326    ///
1327    /// # Safety
1328    ///
1329    /// As with [`MaybeUninit::assume_init`],
1330    /// it is up to the caller to guarantee that the inner value
1331    /// really is in an initialized state.
1332    /// Calling this when the content is not yet fully initialized
1333    /// causes immediate undefined behavior.
1334    ///
1335    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1336    ///
1337    /// # Examples
1338    ///
1339    /// ```
1340    /// #![feature(get_mut_unchecked)]
1341    ///
1342    /// use std::sync::Arc;
1343    ///
1344    /// let mut five = Arc::<u32>::new_uninit();
1345    ///
1346    /// // Deferred initialization:
1347    /// Arc::get_mut(&mut five).unwrap().write(5);
1348    ///
1349    /// let five = unsafe { five.assume_init() };
1350    ///
1351    /// assert_eq!(*five, 5)
1352    /// ```
1353    #[stable(feature = "new_uninit", since = "1.82.0")]
1354    #[must_use = "`self` will be dropped if the result is not used"]
1355    #[inline]
1356    pub unsafe fn assume_init(self) -> Arc<T, A> {
1357        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1358        unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1359    }
1360}
1361
1362impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1363    /// Converts to `Arc<[T]>`.
1364    ///
1365    /// # Safety
1366    ///
1367    /// As with [`MaybeUninit::assume_init`],
1368    /// it is up to the caller to guarantee that the inner value
1369    /// really is in an initialized state.
1370    /// Calling this when the content is not yet fully initialized
1371    /// causes immediate undefined behavior.
1372    ///
1373    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1374    ///
1375    /// # Examples
1376    ///
1377    /// ```
1378    /// #![feature(get_mut_unchecked)]
1379    ///
1380    /// use std::sync::Arc;
1381    ///
1382    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1383    ///
1384    /// // Deferred initialization:
1385    /// let data = Arc::get_mut(&mut values).unwrap();
1386    /// data[0].write(1);
1387    /// data[1].write(2);
1388    /// data[2].write(3);
1389    ///
1390    /// let values = unsafe { values.assume_init() };
1391    ///
1392    /// assert_eq!(*values, [1, 2, 3])
1393    /// ```
1394    #[stable(feature = "new_uninit", since = "1.82.0")]
1395    #[must_use = "`self` will be dropped if the result is not used"]
1396    #[inline]
1397    pub unsafe fn assume_init(self) -> Arc<[T], A> {
1398        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1399        unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1400    }
1401}
1402
1403impl<T: ?Sized> Arc<T> {
1404    /// Constructs an `Arc<T>` from a raw pointer.
1405    ///
1406    /// The raw pointer must have been previously returned by a call to
1407    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1408    ///
1409    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1410    ///   is trivially true if `U` is `T`.
1411    /// * If `U` is unsized, its data pointer must have the same size and
1412    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1413    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1414    ///   coercion].
1415    ///
1416    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1417    /// and alignment, this is basically like transmuting references of
1418    /// different types. See [`mem::transmute`][transmute] for more information
1419    /// on what restrictions apply in this case.
1420    ///
1421    /// The raw pointer must point to a block of memory allocated by the global allocator.
1422    ///
1423    /// The user of `from_raw` has to make sure a specific value of `T` is only
1424    /// dropped once.
1425    ///
1426    /// This function is unsafe because improper use may lead to memory unsafety,
1427    /// even if the returned `Arc<T>` is never accessed.
1428    ///
1429    /// [into_raw]: Arc::into_raw
1430    /// [transmute]: core::mem::transmute
1431    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1432    ///
1433    /// # Examples
1434    ///
1435    /// ```
1436    /// use std::sync::Arc;
1437    ///
1438    /// let x = Arc::new("hello".to_owned());
1439    /// let x_ptr = Arc::into_raw(x);
1440    ///
1441    /// unsafe {
1442    ///     // Convert back to an `Arc` to prevent leak.
1443    ///     let x = Arc::from_raw(x_ptr);
1444    ///     assert_eq!(&*x, "hello");
1445    ///
1446    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1447    /// }
1448    ///
1449    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1450    /// ```
1451    ///
1452    /// Convert a slice back into its original array:
1453    ///
1454    /// ```
1455    /// use std::sync::Arc;
1456    ///
1457    /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1458    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1459    ///
1460    /// unsafe {
1461    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1462    ///     assert_eq!(&*x, &[1, 2, 3]);
1463    /// }
1464    /// ```
1465    #[inline]
1466    #[stable(feature = "rc_raw", since = "1.17.0")]
1467    pub unsafe fn from_raw(ptr: *const T) -> Self {
1468        unsafe { Arc::from_raw_in(ptr, Global) }
1469    }
1470
1471    /// Increments the strong reference count on the `Arc<T>` associated with the
1472    /// provided pointer by one.
1473    ///
1474    /// # Safety
1475    ///
1476    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1477    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1478    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1479    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1480    /// allocated by the global allocator.
1481    ///
1482    /// [from_raw_in]: Arc::from_raw_in
1483    ///
1484    /// # Examples
1485    ///
1486    /// ```
1487    /// use std::sync::Arc;
1488    ///
1489    /// let five = Arc::new(5);
1490    ///
1491    /// unsafe {
1492    ///     let ptr = Arc::into_raw(five);
1493    ///     Arc::increment_strong_count(ptr);
1494    ///
1495    ///     // This assertion is deterministic because we haven't shared
1496    ///     // the `Arc` between threads.
1497    ///     let five = Arc::from_raw(ptr);
1498    ///     assert_eq!(2, Arc::strong_count(&five));
1499    /// #   // Prevent leaks for Miri.
1500    /// #   Arc::decrement_strong_count(ptr);
1501    /// }
1502    /// ```
1503    #[inline]
1504    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1505    pub unsafe fn increment_strong_count(ptr: *const T) {
1506        unsafe { Arc::increment_strong_count_in(ptr, Global) }
1507    }
1508
1509    /// Decrements the strong reference count on the `Arc<T>` associated with the
1510    /// provided pointer by one.
1511    ///
1512    /// # Safety
1513    ///
1514    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1515    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1516    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1517    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1518    /// allocated by the global allocator. This method can be used to release the final
1519    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1520    /// released.
1521    ///
1522    /// [from_raw_in]: Arc::from_raw_in
1523    ///
1524    /// # Examples
1525    ///
1526    /// ```
1527    /// use std::sync::Arc;
1528    ///
1529    /// let five = Arc::new(5);
1530    ///
1531    /// unsafe {
1532    ///     let ptr = Arc::into_raw(five);
1533    ///     Arc::increment_strong_count(ptr);
1534    ///
1535    ///     // Those assertions are deterministic because we haven't shared
1536    ///     // the `Arc` between threads.
1537    ///     let five = Arc::from_raw(ptr);
1538    ///     assert_eq!(2, Arc::strong_count(&five));
1539    ///     Arc::decrement_strong_count(ptr);
1540    ///     assert_eq!(1, Arc::strong_count(&five));
1541    /// }
1542    /// ```
1543    #[inline]
1544    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1545    pub unsafe fn decrement_strong_count(ptr: *const T) {
1546        unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1547    }
1548}
1549
1550impl<T: ?Sized, A: Allocator> Arc<T, A> {
1551    /// Returns a reference to the underlying allocator.
1552    ///
1553    /// Note: this is an associated function, which means that you have
1554    /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1555    /// is so that there is no conflict with a method on the inner type.
1556    #[inline]
1557    #[unstable(feature = "allocator_api", issue = "32838")]
1558    pub fn allocator(this: &Self) -> &A {
1559        &this.alloc
1560    }
1561
1562    /// Consumes the `Arc`, returning the wrapped pointer.
1563    ///
1564    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1565    /// [`Arc::from_raw`].
1566    ///
1567    /// # Examples
1568    ///
1569    /// ```
1570    /// use std::sync::Arc;
1571    ///
1572    /// let x = Arc::new("hello".to_owned());
1573    /// let x_ptr = Arc::into_raw(x);
1574    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1575    /// # // Prevent leaks for Miri.
1576    /// # drop(unsafe { Arc::from_raw(x_ptr) });
1577    /// ```
1578    #[must_use = "losing the pointer will leak memory"]
1579    #[stable(feature = "rc_raw", since = "1.17.0")]
1580    #[rustc_never_returns_null_ptr]
1581    pub fn into_raw(this: Self) -> *const T {
1582        let this = ManuallyDrop::new(this);
1583        Self::as_ptr(&*this)
1584    }
1585
1586    /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1587    ///
1588    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1589    /// [`Arc::from_raw_in`].
1590    ///
1591    /// # Examples
1592    ///
1593    /// ```
1594    /// #![feature(allocator_api)]
1595    /// use std::sync::Arc;
1596    /// use std::alloc::System;
1597    ///
1598    /// let x = Arc::new_in("hello".to_owned(), System);
1599    /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1600    /// assert_eq!(unsafe { &*ptr }, "hello");
1601    /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1602    /// assert_eq!(&*x, "hello");
1603    /// ```
1604    #[must_use = "losing the pointer will leak memory"]
1605    #[unstable(feature = "allocator_api", issue = "32838")]
1606    pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1607        let this = mem::ManuallyDrop::new(this);
1608        let ptr = Self::as_ptr(&this);
1609        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1610        let alloc = unsafe { ptr::read(&this.alloc) };
1611        (ptr, alloc)
1612    }
1613
1614    /// Provides a raw pointer to the data.
1615    ///
1616    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1617    /// as long as there are strong counts in the `Arc`.
1618    ///
1619    /// # Examples
1620    ///
1621    /// ```
1622    /// use std::sync::Arc;
1623    ///
1624    /// let x = Arc::new("hello".to_owned());
1625    /// let y = Arc::clone(&x);
1626    /// let x_ptr = Arc::as_ptr(&x);
1627    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1628    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1629    /// ```
1630    #[must_use]
1631    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1632    #[rustc_never_returns_null_ptr]
1633    pub fn as_ptr(this: &Self) -> *const T {
1634        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1635
1636        // SAFETY: This cannot go through Deref::deref or RcInnerPtr::inner because
1637        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1638        // write through the pointer after the Rc is recovered through `from_raw`.
1639        unsafe { &raw mut (*ptr).data }
1640    }
1641
1642    /// Constructs an `Arc<T, A>` from a raw pointer.
1643    ///
1644    /// The raw pointer must have been previously returned by a call to [`Arc<U,
1645    /// A>::into_raw`][into_raw] with the following requirements:
1646    ///
1647    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1648    ///   is trivially true if `U` is `T`.
1649    /// * If `U` is unsized, its data pointer must have the same size and
1650    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1651    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1652    ///   coercion].
1653    ///
1654    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1655    /// and alignment, this is basically like transmuting references of
1656    /// different types. See [`mem::transmute`][transmute] for more information
1657    /// on what restrictions apply in this case.
1658    ///
1659    /// The raw pointer must point to a block of memory allocated by `alloc`
1660    ///
1661    /// The user of `from_raw` has to make sure a specific value of `T` is only
1662    /// dropped once.
1663    ///
1664    /// This function is unsafe because improper use may lead to memory unsafety,
1665    /// even if the returned `Arc<T>` is never accessed.
1666    ///
1667    /// [into_raw]: Arc::into_raw
1668    /// [transmute]: core::mem::transmute
1669    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1670    ///
1671    /// # Examples
1672    ///
1673    /// ```
1674    /// #![feature(allocator_api)]
1675    ///
1676    /// use std::sync::Arc;
1677    /// use std::alloc::System;
1678    ///
1679    /// let x = Arc::new_in("hello".to_owned(), System);
1680    /// let x_ptr = Arc::into_raw(x);
1681    ///
1682    /// unsafe {
1683    ///     // Convert back to an `Arc` to prevent leak.
1684    ///     let x = Arc::from_raw_in(x_ptr, System);
1685    ///     assert_eq!(&*x, "hello");
1686    ///
1687    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1688    /// }
1689    ///
1690    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1691    /// ```
1692    ///
1693    /// Convert a slice back into its original array:
1694    ///
1695    /// ```
1696    /// #![feature(allocator_api)]
1697    ///
1698    /// use std::sync::Arc;
1699    /// use std::alloc::System;
1700    ///
1701    /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1702    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1703    ///
1704    /// unsafe {
1705    ///     let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1706    ///     assert_eq!(&*x, &[1, 2, 3]);
1707    /// }
1708    /// ```
1709    #[inline]
1710    #[unstable(feature = "allocator_api", issue = "32838")]
1711    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1712        unsafe {
1713            let offset = data_offset(ptr);
1714
1715            // Reverse the offset to find the original ArcInner.
1716            let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1717
1718            Self::from_ptr_in(arc_ptr, alloc)
1719        }
1720    }
1721
1722    /// Creates a new [`Weak`] pointer to this allocation.
1723    ///
1724    /// # Examples
1725    ///
1726    /// ```
1727    /// use std::sync::Arc;
1728    ///
1729    /// let five = Arc::new(5);
1730    ///
1731    /// let weak_five = Arc::downgrade(&five);
1732    /// ```
1733    #[must_use = "this returns a new `Weak` pointer, \
1734                  without modifying the original `Arc`"]
1735    #[stable(feature = "arc_weak", since = "1.4.0")]
1736    pub fn downgrade(this: &Self) -> Weak<T, A>
1737    where
1738        A: Clone,
1739    {
1740        // This Relaxed is OK because we're checking the value in the CAS
1741        // below.
1742        let mut cur = this.inner().weak.load(Relaxed);
1743
1744        loop {
1745            // check if the weak counter is currently "locked"; if so, spin.
1746            if cur == usize::MAX {
1747                hint::spin_loop();
1748                cur = this.inner().weak.load(Relaxed);
1749                continue;
1750            }
1751
1752            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1753            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1754
1755            // NOTE: this code currently ignores the possibility of overflow
1756            // into usize::MAX; in general both Rc and Arc need to be adjusted
1757            // to deal with overflow.
1758
1759            // Unlike with Clone(), we need this to be an Acquire read to
1760            // synchronize with the write coming from `is_unique`, so that the
1761            // events prior to that write happen before this read.
1762            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1763                Ok(_) => {
1764                    // Make sure we do not create a dangling Weak
1765                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1766                    return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1767                }
1768                Err(old) => cur = old,
1769            }
1770        }
1771    }
1772
1773    /// Gets the number of [`Weak`] pointers to this allocation.
1774    ///
1775    /// # Safety
1776    ///
1777    /// This method by itself is safe, but using it correctly requires extra care.
1778    /// Another thread can change the weak count at any time,
1779    /// including potentially between calling this method and acting on the result.
1780    ///
1781    /// # Examples
1782    ///
1783    /// ```
1784    /// use std::sync::Arc;
1785    ///
1786    /// let five = Arc::new(5);
1787    /// let _weak_five = Arc::downgrade(&five);
1788    ///
1789    /// // This assertion is deterministic because we haven't shared
1790    /// // the `Arc` or `Weak` between threads.
1791    /// assert_eq!(1, Arc::weak_count(&five));
1792    /// ```
1793    #[inline]
1794    #[must_use]
1795    #[stable(feature = "arc_counts", since = "1.15.0")]
1796    pub fn weak_count(this: &Self) -> usize {
1797        let cnt = this.inner().weak.load(Relaxed);
1798        // If the weak count is currently locked, the value of the
1799        // count was 0 just before taking the lock.
1800        if cnt == usize::MAX { 0 } else { cnt - 1 }
1801    }
1802
1803    /// Gets the number of strong (`Arc`) pointers to this allocation.
1804    ///
1805    /// # Safety
1806    ///
1807    /// This method by itself is safe, but using it correctly requires extra care.
1808    /// Another thread can change the strong count at any time,
1809    /// including potentially between calling this method and acting on the result.
1810    ///
1811    /// # Examples
1812    ///
1813    /// ```
1814    /// use std::sync::Arc;
1815    ///
1816    /// let five = Arc::new(5);
1817    /// let _also_five = Arc::clone(&five);
1818    ///
1819    /// // This assertion is deterministic because we haven't shared
1820    /// // the `Arc` between threads.
1821    /// assert_eq!(2, Arc::strong_count(&five));
1822    /// ```
1823    #[inline]
1824    #[must_use]
1825    #[stable(feature = "arc_counts", since = "1.15.0")]
1826    pub fn strong_count(this: &Self) -> usize {
1827        this.inner().strong.load(Relaxed)
1828    }
1829
1830    /// Increments the strong reference count on the `Arc<T>` associated with the
1831    /// provided pointer by one.
1832    ///
1833    /// # Safety
1834    ///
1835    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1836    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1837    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1838    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1839    /// allocated by `alloc`.
1840    ///
1841    /// [from_raw_in]: Arc::from_raw_in
1842    ///
1843    /// # Examples
1844    ///
1845    /// ```
1846    /// #![feature(allocator_api)]
1847    ///
1848    /// use std::sync::Arc;
1849    /// use std::alloc::System;
1850    ///
1851    /// let five = Arc::new_in(5, System);
1852    ///
1853    /// unsafe {
1854    ///     let ptr = Arc::into_raw(five);
1855    ///     Arc::increment_strong_count_in(ptr, System);
1856    ///
1857    ///     // This assertion is deterministic because we haven't shared
1858    ///     // the `Arc` between threads.
1859    ///     let five = Arc::from_raw_in(ptr, System);
1860    ///     assert_eq!(2, Arc::strong_count(&five));
1861    /// #   // Prevent leaks for Miri.
1862    /// #   Arc::decrement_strong_count_in(ptr, System);
1863    /// }
1864    /// ```
1865    #[inline]
1866    #[unstable(feature = "allocator_api", issue = "32838")]
1867    pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1868    where
1869        A: Clone,
1870    {
1871        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1872        let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1873        // Now increase refcount, but don't drop new refcount either
1874        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1875    }
1876
1877    /// Decrements the strong reference count on the `Arc<T>` associated with the
1878    /// provided pointer by one.
1879    ///
1880    /// # Safety
1881    ///
1882    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1883    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1884    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1885    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1886    /// allocated by `alloc`. This method can be used to release the final
1887    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1888    /// released.
1889    ///
1890    /// [from_raw_in]: Arc::from_raw_in
1891    ///
1892    /// # Examples
1893    ///
1894    /// ```
1895    /// #![feature(allocator_api)]
1896    ///
1897    /// use std::sync::Arc;
1898    /// use std::alloc::System;
1899    ///
1900    /// let five = Arc::new_in(5, System);
1901    ///
1902    /// unsafe {
1903    ///     let ptr = Arc::into_raw(five);
1904    ///     Arc::increment_strong_count_in(ptr, System);
1905    ///
1906    ///     // Those assertions are deterministic because we haven't shared
1907    ///     // the `Arc` between threads.
1908    ///     let five = Arc::from_raw_in(ptr, System);
1909    ///     assert_eq!(2, Arc::strong_count(&five));
1910    ///     Arc::decrement_strong_count_in(ptr, System);
1911    ///     assert_eq!(1, Arc::strong_count(&five));
1912    /// }
1913    /// ```
1914    #[inline]
1915    #[unstable(feature = "allocator_api", issue = "32838")]
1916    pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1917        unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1918    }
1919
1920    #[inline]
1921    fn inner(&self) -> &ArcInner<T> {
1922        // This unsafety is ok because while this arc is alive we're guaranteed
1923        // that the inner pointer is valid. Furthermore, we know that the
1924        // `ArcInner` structure itself is `Sync` because the inner data is
1925        // `Sync` as well, so we're ok loaning out an immutable pointer to these
1926        // contents.
1927        unsafe { self.ptr.as_ref() }
1928    }
1929
1930    // Non-inlined part of `drop`.
1931    #[inline(never)]
1932    unsafe fn drop_slow(&mut self) {
1933        // Drop the weak ref collectively held by all strong references when this
1934        // variable goes out of scope. This ensures that the memory is deallocated
1935        // even if the destructor of `T` panics.
1936        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1937        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1938        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
1939
1940        // Destroy the data at this time, even though we must not free the box
1941        // allocation itself (there might still be weak pointers lying around).
1942        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1943        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1944    }
1945
1946    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1947    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
1948    ///
1949    /// # Examples
1950    ///
1951    /// ```
1952    /// use std::sync::Arc;
1953    ///
1954    /// let five = Arc::new(5);
1955    /// let same_five = Arc::clone(&five);
1956    /// let other_five = Arc::new(5);
1957    ///
1958    /// assert!(Arc::ptr_eq(&five, &same_five));
1959    /// assert!(!Arc::ptr_eq(&five, &other_five));
1960    /// ```
1961    ///
1962    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1963    #[inline]
1964    #[must_use]
1965    #[stable(feature = "ptr_eq", since = "1.17.0")]
1966    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1967        ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1968    }
1969}
1970
1971impl<T: ?Sized> Arc<T> {
1972    /// Allocates an `ArcInner<T>` with sufficient space for
1973    /// a possibly-unsized inner value where the value has the layout provided.
1974    ///
1975    /// The function `mem_to_arcinner` is called with the data pointer
1976    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1977    #[cfg(not(no_global_oom_handling))]
1978    unsafe fn allocate_for_layout(
1979        value_layout: Layout,
1980        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1981        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1982    ) -> *mut ArcInner<T> {
1983        let layout = arcinner_layout_for_value_layout(value_layout);
1984
1985        let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1986
1987        unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1988    }
1989
1990    /// Allocates an `ArcInner<T>` with sufficient space for
1991    /// a possibly-unsized inner value where the value has the layout provided,
1992    /// returning an error if allocation fails.
1993    ///
1994    /// The function `mem_to_arcinner` is called with the data pointer
1995    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1996    unsafe fn try_allocate_for_layout(
1997        value_layout: Layout,
1998        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1999        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2000    ) -> Result<*mut ArcInner<T>, AllocError> {
2001        let layout = arcinner_layout_for_value_layout(value_layout);
2002
2003        let ptr = allocate(layout)?;
2004
2005        let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
2006
2007        Ok(inner)
2008    }
2009
2010    unsafe fn initialize_arcinner(
2011        ptr: NonNull<[u8]>,
2012        layout: Layout,
2013        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2014    ) -> *mut ArcInner<T> {
2015        let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2016        debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2017
2018        unsafe {
2019            (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2020            (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2021        }
2022
2023        inner
2024    }
2025}
2026
2027impl<T: ?Sized, A: Allocator> Arc<T, A> {
2028    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2029    #[inline]
2030    #[cfg(not(no_global_oom_handling))]
2031    unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2032        // Allocate for the `ArcInner<T>` using the given value.
2033        unsafe {
2034            Arc::allocate_for_layout(
2035                Layout::for_value_raw(ptr),
2036                |layout| alloc.allocate(layout),
2037                |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2038            )
2039        }
2040    }
2041
2042    #[cfg(not(no_global_oom_handling))]
2043    fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2044        unsafe {
2045            let value_size = size_of_val(&*src);
2046            let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2047
2048            // Copy value as bytes
2049            ptr::copy_nonoverlapping(
2050                (&raw const *src) as *const u8,
2051                (&raw mut (*ptr).data) as *mut u8,
2052                value_size,
2053            );
2054
2055            // Free the allocation without dropping its contents
2056            let (bptr, alloc) = Box::into_raw_with_allocator(src);
2057            let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2058            drop(src);
2059
2060            Self::from_ptr_in(ptr, alloc)
2061        }
2062    }
2063}
2064
2065impl<T> Arc<[T]> {
2066    /// Allocates an `ArcInner<[T]>` with the given length.
2067    #[cfg(not(no_global_oom_handling))]
2068    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2069        unsafe {
2070            Self::allocate_for_layout(
2071                Layout::array::<T>(len).unwrap(),
2072                |layout| Global.allocate(layout),
2073                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2074            )
2075        }
2076    }
2077
2078    /// Copy elements from slice into newly allocated `Arc<[T]>`
2079    ///
2080    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2081    #[cfg(not(no_global_oom_handling))]
2082    unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2083        unsafe {
2084            let ptr = Self::allocate_for_slice(v.len());
2085
2086            ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2087
2088            Self::from_ptr(ptr)
2089        }
2090    }
2091
2092    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2093    ///
2094    /// Behavior is undefined should the size be wrong.
2095    #[cfg(not(no_global_oom_handling))]
2096    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2097        // Panic guard while cloning T elements.
2098        // In the event of a panic, elements that have been written
2099        // into the new ArcInner will be dropped, then the memory freed.
2100        struct Guard<T> {
2101            mem: NonNull<u8>,
2102            elems: *mut T,
2103            layout: Layout,
2104            n_elems: usize,
2105        }
2106
2107        impl<T> Drop for Guard<T> {
2108            fn drop(&mut self) {
2109                unsafe {
2110                    let slice = from_raw_parts_mut(self.elems, self.n_elems);
2111                    ptr::drop_in_place(slice);
2112
2113                    Global.deallocate(self.mem, self.layout);
2114                }
2115            }
2116        }
2117
2118        unsafe {
2119            let ptr = Self::allocate_for_slice(len);
2120
2121            let mem = ptr as *mut _ as *mut u8;
2122            let layout = Layout::for_value_raw(ptr);
2123
2124            // Pointer to first element
2125            let elems = (&raw mut (*ptr).data) as *mut T;
2126
2127            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2128
2129            for (i, item) in iter.enumerate() {
2130                ptr::write(elems.add(i), item);
2131                guard.n_elems += 1;
2132            }
2133
2134            // All clear. Forget the guard so it doesn't free the new ArcInner.
2135            mem::forget(guard);
2136
2137            Self::from_ptr(ptr)
2138        }
2139    }
2140}
2141
2142impl<T, A: Allocator> Arc<[T], A> {
2143    /// Allocates an `ArcInner<[T]>` with the given length.
2144    #[inline]
2145    #[cfg(not(no_global_oom_handling))]
2146    unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2147        unsafe {
2148            Arc::allocate_for_layout(
2149                Layout::array::<T>(len).unwrap(),
2150                |layout| alloc.allocate(layout),
2151                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2152            )
2153        }
2154    }
2155}
2156
2157/// Specialization trait used for `From<&[T]>`.
2158#[cfg(not(no_global_oom_handling))]
2159trait ArcFromSlice<T> {
2160    fn from_slice(slice: &[T]) -> Self;
2161}
2162
2163#[cfg(not(no_global_oom_handling))]
2164impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2165    #[inline]
2166    default fn from_slice(v: &[T]) -> Self {
2167        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2168    }
2169}
2170
2171#[cfg(not(no_global_oom_handling))]
2172impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2173    #[inline]
2174    fn from_slice(v: &[T]) -> Self {
2175        unsafe { Arc::copy_from_slice(v) }
2176    }
2177}
2178
2179#[stable(feature = "rust1", since = "1.0.0")]
2180impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2181    /// Makes a clone of the `Arc` pointer.
2182    ///
2183    /// This creates another pointer to the same allocation, increasing the
2184    /// strong reference count.
2185    ///
2186    /// # Examples
2187    ///
2188    /// ```
2189    /// use std::sync::Arc;
2190    ///
2191    /// let five = Arc::new(5);
2192    ///
2193    /// let _ = Arc::clone(&five);
2194    /// ```
2195    #[inline]
2196    fn clone(&self) -> Arc<T, A> {
2197        // Using a relaxed ordering is alright here, as knowledge of the
2198        // original reference prevents other threads from erroneously deleting
2199        // the object.
2200        //
2201        // As explained in the [Boost documentation][1], Increasing the
2202        // reference counter can always be done with memory_order_relaxed: New
2203        // references to an object can only be formed from an existing
2204        // reference, and passing an existing reference from one thread to
2205        // another must already provide any required synchronization.
2206        //
2207        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2208        let old_size = self.inner().strong.fetch_add(1, Relaxed);
2209
2210        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2211        // Arcs. If we don't do this the count can overflow and users will use-after free. This
2212        // branch will never be taken in any realistic program. We abort because such a program is
2213        // incredibly degenerate, and we don't care to support it.
2214        //
2215        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2216        // But we do that check *after* having done the increment, so there is a chance here that
2217        // the worst already happened and we actually do overflow the `usize` counter. However, that
2218        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2219        // above and the `abort` below, which seems exceedingly unlikely.
2220        //
2221        // This is a global invariant, and also applies when using a compare-exchange loop to increment
2222        // counters in other methods.
2223        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2224        // and then overflow using a few `fetch_add`s.
2225        if old_size > MAX_REFCOUNT {
2226            abort();
2227        }
2228
2229        unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2230    }
2231}
2232
2233#[unstable(feature = "ergonomic_clones", issue = "132290")]
2234impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2235
2236#[stable(feature = "rust1", since = "1.0.0")]
2237impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2238    type Target = T;
2239
2240    #[inline]
2241    fn deref(&self) -> &T {
2242        &self.inner().data
2243    }
2244}
2245
2246#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2247unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2248
2249#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2250unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2251
2252#[unstable(feature = "deref_pure_trait", issue = "87121")]
2253unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2254
2255#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2256impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2257
2258#[cfg(not(no_global_oom_handling))]
2259impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2260    /// Makes a mutable reference into the given `Arc`.
2261    ///
2262    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2263    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
2264    /// referred to as clone-on-write.
2265    ///
2266    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2267    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2268    /// be cloned.
2269    ///
2270    /// See also [`get_mut`], which will fail rather than cloning the inner value
2271    /// or dissociating [`Weak`] pointers.
2272    ///
2273    /// [`clone`]: Clone::clone
2274    /// [`get_mut`]: Arc::get_mut
2275    ///
2276    /// # Examples
2277    ///
2278    /// ```
2279    /// use std::sync::Arc;
2280    ///
2281    /// let mut data = Arc::new(5);
2282    ///
2283    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2284    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2285    /// *Arc::make_mut(&mut data) += 1;         // Clones inner data
2286    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2287    /// *Arc::make_mut(&mut other_data) *= 2;   // Won't clone anything
2288    ///
2289    /// // Now `data` and `other_data` point to different allocations.
2290    /// assert_eq!(*data, 8);
2291    /// assert_eq!(*other_data, 12);
2292    /// ```
2293    ///
2294    /// [`Weak`] pointers will be dissociated:
2295    ///
2296    /// ```
2297    /// use std::sync::Arc;
2298    ///
2299    /// let mut data = Arc::new(75);
2300    /// let weak = Arc::downgrade(&data);
2301    ///
2302    /// assert!(75 == *data);
2303    /// assert!(75 == *weak.upgrade().unwrap());
2304    ///
2305    /// *Arc::make_mut(&mut data) += 1;
2306    ///
2307    /// assert!(76 == *data);
2308    /// assert!(weak.upgrade().is_none());
2309    /// ```
2310    #[inline]
2311    #[stable(feature = "arc_unique", since = "1.4.0")]
2312    pub fn make_mut(this: &mut Self) -> &mut T {
2313        let size_of_val = size_of_val::<T>(&**this);
2314
2315        // Note that we hold both a strong reference and a weak reference.
2316        // Thus, releasing our strong reference only will not, by itself, cause
2317        // the memory to be deallocated.
2318        //
2319        // Use Acquire to ensure that we see any writes to `weak` that happen
2320        // before release writes (i.e., decrements) to `strong`. Since we hold a
2321        // weak count, there's no chance the ArcInner itself could be
2322        // deallocated.
2323        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2324            // Another strong pointer exists, so we must clone.
2325
2326            let this_data_ref: &T = &**this;
2327            // `in_progress` drops the allocation if we panic before finishing initializing it.
2328            let mut in_progress: UniqueArcUninit<T, A> =
2329                UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2330
2331            let initialized_clone = unsafe {
2332                // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2333                this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2334                // Cast type of pointer, now that it is initialized.
2335                in_progress.into_arc()
2336            };
2337            *this = initialized_clone;
2338        } else if this.inner().weak.load(Relaxed) != 1 {
2339            // Relaxed suffices in the above because this is fundamentally an
2340            // optimization: we are always racing with weak pointers being
2341            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2342
2343            // We removed the last strong ref, but there are additional weak
2344            // refs remaining. We'll move the contents to a new Arc, and
2345            // invalidate the other weak refs.
2346
2347            // Note that it is not possible for the read of `weak` to yield
2348            // usize::MAX (i.e., locked), since the weak count can only be
2349            // locked by a thread with a strong reference.
2350
2351            // Materialize our own implicit weak pointer, so that it can clean
2352            // up the ArcInner as needed.
2353            let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2354
2355            // Can just steal the data, all that's left is Weaks
2356            //
2357            // We don't need panic-protection like the above branch does, but we might as well
2358            // use the same mechanism.
2359            let mut in_progress: UniqueArcUninit<T, A> =
2360                UniqueArcUninit::new(&**this, this.alloc.clone());
2361            unsafe {
2362                // Initialize `in_progress` with move of **this.
2363                // We have to express this in terms of bytes because `T: ?Sized`; there is no
2364                // operation that just copies a value based on its `size_of_val()`.
2365                ptr::copy_nonoverlapping(
2366                    ptr::from_ref(&**this).cast::<u8>(),
2367                    in_progress.data_ptr().cast::<u8>(),
2368                    size_of_val,
2369                );
2370
2371                ptr::write(this, in_progress.into_arc());
2372            }
2373        } else {
2374            // We were the sole reference of either kind; bump back up the
2375            // strong ref count.
2376            this.inner().strong.store(1, Release);
2377        }
2378
2379        // As with `get_mut()`, the unsafety is ok because our reference was
2380        // either unique to begin with, or became one upon cloning the contents.
2381        unsafe { Self::get_mut_unchecked(this) }
2382    }
2383}
2384
2385impl<T: Clone, A: Allocator> Arc<T, A> {
2386    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2387    /// clone.
2388    ///
2389    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2390    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2391    ///
2392    /// # Examples
2393    ///
2394    /// ```
2395    /// # use std::{ptr, sync::Arc};
2396    /// let inner = String::from("test");
2397    /// let ptr = inner.as_ptr();
2398    ///
2399    /// let arc = Arc::new(inner);
2400    /// let inner = Arc::unwrap_or_clone(arc);
2401    /// // The inner value was not cloned
2402    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2403    ///
2404    /// let arc = Arc::new(inner);
2405    /// let arc2 = arc.clone();
2406    /// let inner = Arc::unwrap_or_clone(arc);
2407    /// // Because there were 2 references, we had to clone the inner value.
2408    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2409    /// // `arc2` is the last reference, so when we unwrap it we get back
2410    /// // the original `String`.
2411    /// let inner = Arc::unwrap_or_clone(arc2);
2412    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2413    /// ```
2414    #[inline]
2415    #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2416    pub fn unwrap_or_clone(this: Self) -> T {
2417        Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2418    }
2419}
2420
2421impl<T: ?Sized, A: Allocator> Arc<T, A> {
2422    /// Returns a mutable reference into the given `Arc`, if there are
2423    /// no other `Arc` or [`Weak`] pointers to the same allocation.
2424    ///
2425    /// Returns [`None`] otherwise, because it is not safe to
2426    /// mutate a shared value.
2427    ///
2428    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2429    /// the inner value when there are other `Arc` pointers.
2430    ///
2431    /// [make_mut]: Arc::make_mut
2432    /// [clone]: Clone::clone
2433    ///
2434    /// # Examples
2435    ///
2436    /// ```
2437    /// use std::sync::Arc;
2438    ///
2439    /// let mut x = Arc::new(3);
2440    /// *Arc::get_mut(&mut x).unwrap() = 4;
2441    /// assert_eq!(*x, 4);
2442    ///
2443    /// let _y = Arc::clone(&x);
2444    /// assert!(Arc::get_mut(&mut x).is_none());
2445    /// ```
2446    #[inline]
2447    #[stable(feature = "arc_unique", since = "1.4.0")]
2448    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2449        if Self::is_unique(this) {
2450            // This unsafety is ok because we're guaranteed that the pointer
2451            // returned is the *only* pointer that will ever be returned to T. Our
2452            // reference count is guaranteed to be 1 at this point, and we required
2453            // the Arc itself to be `mut`, so we're returning the only possible
2454            // reference to the inner data.
2455            unsafe { Some(Arc::get_mut_unchecked(this)) }
2456        } else {
2457            None
2458        }
2459    }
2460
2461    /// Returns a mutable reference into the given `Arc`,
2462    /// without any check.
2463    ///
2464    /// See also [`get_mut`], which is safe and does appropriate checks.
2465    ///
2466    /// [`get_mut`]: Arc::get_mut
2467    ///
2468    /// # Safety
2469    ///
2470    /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2471    /// they must not be dereferenced or have active borrows for the duration
2472    /// of the returned borrow, and their inner type must be exactly the same as the
2473    /// inner type of this Rc (including lifetimes). This is trivially the case if no
2474    /// such pointers exist, for example immediately after `Arc::new`.
2475    ///
2476    /// # Examples
2477    ///
2478    /// ```
2479    /// #![feature(get_mut_unchecked)]
2480    ///
2481    /// use std::sync::Arc;
2482    ///
2483    /// let mut x = Arc::new(String::new());
2484    /// unsafe {
2485    ///     Arc::get_mut_unchecked(&mut x).push_str("foo")
2486    /// }
2487    /// assert_eq!(*x, "foo");
2488    /// ```
2489    /// Other `Arc` pointers to the same allocation must be to the same type.
2490    /// ```no_run
2491    /// #![feature(get_mut_unchecked)]
2492    ///
2493    /// use std::sync::Arc;
2494    ///
2495    /// let x: Arc<str> = Arc::from("Hello, world!");
2496    /// let mut y: Arc<[u8]> = x.clone().into();
2497    /// unsafe {
2498    ///     // this is Undefined Behavior, because x's inner type is str, not [u8]
2499    ///     Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2500    /// }
2501    /// println!("{}", &*x); // Invalid UTF-8 in a str
2502    /// ```
2503    /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2504    /// ```no_run
2505    /// #![feature(get_mut_unchecked)]
2506    ///
2507    /// use std::sync::Arc;
2508    ///
2509    /// let x: Arc<&str> = Arc::new("Hello, world!");
2510    /// {
2511    ///     let s = String::from("Oh, no!");
2512    ///     let mut y: Arc<&str> = x.clone();
2513    ///     unsafe {
2514    ///         // this is Undefined Behavior, because x's inner type
2515    ///         // is &'long str, not &'short str
2516    ///         *Arc::get_mut_unchecked(&mut y) = &s;
2517    ///     }
2518    /// }
2519    /// println!("{}", &*x); // Use-after-free
2520    /// ```
2521    #[inline]
2522    #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2523    pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2524        // We are careful to *not* create a reference covering the "count" fields, as
2525        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2526        unsafe { &mut (*this.ptr.as_ptr()).data }
2527    }
2528
2529    /// Determine whether this is the unique reference to the underlying data.
2530    ///
2531    /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2532    /// returns `false` otherwise.
2533    ///
2534    /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2535    /// on this `Arc`, so long as no clones occur in between.
2536    ///
2537    /// # Examples
2538    ///
2539    /// ```
2540    /// #![feature(arc_is_unique)]
2541    ///
2542    /// use std::sync::Arc;
2543    ///
2544    /// let x = Arc::new(3);
2545    /// assert!(Arc::is_unique(&x));
2546    ///
2547    /// let y = Arc::clone(&x);
2548    /// assert!(!Arc::is_unique(&x));
2549    /// drop(y);
2550    ///
2551    /// // Weak references also count, because they could be upgraded at any time.
2552    /// let z = Arc::downgrade(&x);
2553    /// assert!(!Arc::is_unique(&x));
2554    /// ```
2555    ///
2556    /// # Pointer invalidation
2557    ///
2558    /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2559    /// unlike that operation it does not produce any mutable references to the underlying data,
2560    /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2561    /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2562    ///
2563    /// ```
2564    /// #![feature(arc_is_unique)]
2565    ///
2566    /// use std::sync::Arc;
2567    ///
2568    /// let arc = Arc::new(5);
2569    /// let pointer: *const i32 = &*arc;
2570    /// assert!(Arc::is_unique(&arc));
2571    /// assert_eq!(unsafe { *pointer }, 5);
2572    /// ```
2573    ///
2574    /// # Atomic orderings
2575    ///
2576    /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2577    /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2578    /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2579    ///
2580    /// Note that this operation requires locking the weak ref count, so concurrent calls to
2581    /// `downgrade` may spin-loop for a short period of time.
2582    ///
2583    /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2584    #[inline]
2585    #[unstable(feature = "arc_is_unique", issue = "138938")]
2586    pub fn is_unique(this: &Self) -> bool {
2587        // lock the weak pointer count if we appear to be the sole weak pointer
2588        // holder.
2589        //
2590        // The acquire label here ensures a happens-before relationship with any
2591        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2592        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2593        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2594        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2595            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2596            // counter in `drop` -- the only access that happens when any but the last reference
2597            // is being dropped.
2598            let unique = this.inner().strong.load(Acquire) == 1;
2599
2600            // The release write here synchronizes with a read in `downgrade`,
2601            // effectively preventing the above read of `strong` from happening
2602            // after the write.
2603            this.inner().weak.store(1, Release); // release the lock
2604            unique
2605        } else {
2606            false
2607        }
2608    }
2609}
2610
2611#[stable(feature = "rust1", since = "1.0.0")]
2612unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2613    /// Drops the `Arc`.
2614    ///
2615    /// This will decrement the strong reference count. If the strong reference
2616    /// count reaches zero then the only other references (if any) are
2617    /// [`Weak`], so we `drop` the inner value.
2618    ///
2619    /// # Examples
2620    ///
2621    /// ```
2622    /// use std::sync::Arc;
2623    ///
2624    /// struct Foo;
2625    ///
2626    /// impl Drop for Foo {
2627    ///     fn drop(&mut self) {
2628    ///         println!("dropped!");
2629    ///     }
2630    /// }
2631    ///
2632    /// let foo  = Arc::new(Foo);
2633    /// let foo2 = Arc::clone(&foo);
2634    ///
2635    /// drop(foo);    // Doesn't print anything
2636    /// drop(foo2);   // Prints "dropped!"
2637    /// ```
2638    #[inline]
2639    fn drop(&mut self) {
2640        // Because `fetch_sub` is already atomic, we do not need to synchronize
2641        // with other threads unless we are going to delete the object. This
2642        // same logic applies to the below `fetch_sub` to the `weak` count.
2643        if self.inner().strong.fetch_sub(1, Release) != 1 {
2644            return;
2645        }
2646
2647        // This fence is needed to prevent reordering of use of the data and
2648        // deletion of the data. Because it is marked `Release`, the decreasing
2649        // of the reference count synchronizes with this `Acquire` fence. This
2650        // means that use of the data happens before decreasing the reference
2651        // count, which happens before this fence, which happens before the
2652        // deletion of the data.
2653        //
2654        // As explained in the [Boost documentation][1],
2655        //
2656        // > It is important to enforce any possible access to the object in one
2657        // > thread (through an existing reference) to *happen before* deleting
2658        // > the object in a different thread. This is achieved by a "release"
2659        // > operation after dropping a reference (any access to the object
2660        // > through this reference must obviously happened before), and an
2661        // > "acquire" operation before deleting the object.
2662        //
2663        // In particular, while the contents of an Arc are usually immutable, it's
2664        // possible to have interior writes to something like a Mutex<T>. Since a
2665        // Mutex is not acquired when it is deleted, we can't rely on its
2666        // synchronization logic to make writes in thread A visible to a destructor
2667        // running in thread B.
2668        //
2669        // Also note that the Acquire fence here could probably be replaced with an
2670        // Acquire load, which could improve performance in highly-contended
2671        // situations. See [2].
2672        //
2673        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2674        // [2]: (https://github.com/rust-lang/rust/pull/41714)
2675        acquire!(self.inner().strong);
2676
2677        // Make sure we aren't trying to "drop" the shared static for empty slices
2678        // used by Default::default.
2679        debug_assert!(
2680            !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2681            "Arcs backed by a static should never reach a strong count of 0. \
2682            Likely decrement_strong_count or from_raw were called too many times.",
2683        );
2684
2685        unsafe {
2686            self.drop_slow();
2687        }
2688    }
2689}
2690
2691impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2692    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2693    ///
2694    /// # Examples
2695    ///
2696    /// ```
2697    /// use std::any::Any;
2698    /// use std::sync::Arc;
2699    ///
2700    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2701    ///     if let Ok(string) = value.downcast::<String>() {
2702    ///         println!("String ({}): {}", string.len(), string);
2703    ///     }
2704    /// }
2705    ///
2706    /// let my_string = "Hello World".to_string();
2707    /// print_if_string(Arc::new(my_string));
2708    /// print_if_string(Arc::new(0i8));
2709    /// ```
2710    #[inline]
2711    #[stable(feature = "rc_downcast", since = "1.29.0")]
2712    pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2713    where
2714        T: Any + Send + Sync,
2715    {
2716        if (*self).is::<T>() {
2717            unsafe {
2718                let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2719                Ok(Arc::from_inner_in(ptr.cast(), alloc))
2720            }
2721        } else {
2722            Err(self)
2723        }
2724    }
2725
2726    /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2727    ///
2728    /// For a safe alternative see [`downcast`].
2729    ///
2730    /// # Examples
2731    ///
2732    /// ```
2733    /// #![feature(downcast_unchecked)]
2734    ///
2735    /// use std::any::Any;
2736    /// use std::sync::Arc;
2737    ///
2738    /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2739    ///
2740    /// unsafe {
2741    ///     assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2742    /// }
2743    /// ```
2744    ///
2745    /// # Safety
2746    ///
2747    /// The contained value must be of type `T`. Calling this method
2748    /// with the incorrect type is *undefined behavior*.
2749    ///
2750    ///
2751    /// [`downcast`]: Self::downcast
2752    #[inline]
2753    #[unstable(feature = "downcast_unchecked", issue = "90850")]
2754    pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2755    where
2756        T: Any + Send + Sync,
2757    {
2758        unsafe {
2759            let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2760            Arc::from_inner_in(ptr.cast(), alloc)
2761        }
2762    }
2763}
2764
2765impl<T> Weak<T> {
2766    /// Constructs a new `Weak<T>`, without allocating any memory.
2767    /// Calling [`upgrade`] on the return value always gives [`None`].
2768    ///
2769    /// [`upgrade`]: Weak::upgrade
2770    ///
2771    /// # Examples
2772    ///
2773    /// ```
2774    /// use std::sync::Weak;
2775    ///
2776    /// let empty: Weak<i64> = Weak::new();
2777    /// assert!(empty.upgrade().is_none());
2778    /// ```
2779    #[inline]
2780    #[stable(feature = "downgraded_weak", since = "1.10.0")]
2781    #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2782    #[must_use]
2783    pub const fn new() -> Weak<T> {
2784        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2785    }
2786}
2787
2788impl<T, A: Allocator> Weak<T, A> {
2789    /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2790    /// allocator.
2791    /// Calling [`upgrade`] on the return value always gives [`None`].
2792    ///
2793    /// [`upgrade`]: Weak::upgrade
2794    ///
2795    /// # Examples
2796    ///
2797    /// ```
2798    /// #![feature(allocator_api)]
2799    ///
2800    /// use std::sync::Weak;
2801    /// use std::alloc::System;
2802    ///
2803    /// let empty: Weak<i64, _> = Weak::new_in(System);
2804    /// assert!(empty.upgrade().is_none());
2805    /// ```
2806    #[inline]
2807    #[unstable(feature = "allocator_api", issue = "32838")]
2808    pub fn new_in(alloc: A) -> Weak<T, A> {
2809        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2810    }
2811}
2812
2813/// Helper type to allow accessing the reference counts without
2814/// making any assertions about the data field.
2815struct WeakInner<'a> {
2816    weak: &'a Atomic<usize>,
2817    strong: &'a Atomic<usize>,
2818}
2819
2820impl<T: ?Sized> Weak<T> {
2821    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2822    ///
2823    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2824    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2825    ///
2826    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2827    /// as these don't own anything; the method still works on them).
2828    ///
2829    /// # Safety
2830    ///
2831    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2832    /// weak reference, and must point to a block of memory allocated by global allocator.
2833    ///
2834    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2835    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2836    /// count is not modified by this operation) and therefore it must be paired with a previous
2837    /// call to [`into_raw`].
2838    /// # Examples
2839    ///
2840    /// ```
2841    /// use std::sync::{Arc, Weak};
2842    ///
2843    /// let strong = Arc::new("hello".to_owned());
2844    ///
2845    /// let raw_1 = Arc::downgrade(&strong).into_raw();
2846    /// let raw_2 = Arc::downgrade(&strong).into_raw();
2847    ///
2848    /// assert_eq!(2, Arc::weak_count(&strong));
2849    ///
2850    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2851    /// assert_eq!(1, Arc::weak_count(&strong));
2852    ///
2853    /// drop(strong);
2854    ///
2855    /// // Decrement the last weak count.
2856    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2857    /// ```
2858    ///
2859    /// [`new`]: Weak::new
2860    /// [`into_raw`]: Weak::into_raw
2861    /// [`upgrade`]: Weak::upgrade
2862    #[inline]
2863    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2864    pub unsafe fn from_raw(ptr: *const T) -> Self {
2865        unsafe { Weak::from_raw_in(ptr, Global) }
2866    }
2867}
2868
2869impl<T: ?Sized, A: Allocator> Weak<T, A> {
2870    /// Returns a reference to the underlying allocator.
2871    #[inline]
2872    #[unstable(feature = "allocator_api", issue = "32838")]
2873    pub fn allocator(&self) -> &A {
2874        &self.alloc
2875    }
2876
2877    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2878    ///
2879    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2880    /// unaligned or even [`null`] otherwise.
2881    ///
2882    /// # Examples
2883    ///
2884    /// ```
2885    /// use std::sync::Arc;
2886    /// use std::ptr;
2887    ///
2888    /// let strong = Arc::new("hello".to_owned());
2889    /// let weak = Arc::downgrade(&strong);
2890    /// // Both point to the same object
2891    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2892    /// // The strong here keeps it alive, so we can still access the object.
2893    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2894    ///
2895    /// drop(strong);
2896    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2897    /// // undefined behavior.
2898    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2899    /// ```
2900    ///
2901    /// [`null`]: core::ptr::null "ptr::null"
2902    #[must_use]
2903    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2904    pub fn as_ptr(&self) -> *const T {
2905        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2906
2907        if is_dangling(ptr) {
2908            // If the pointer is dangling, we return the sentinel directly. This cannot be
2909            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2910            ptr as *const T
2911        } else {
2912            // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2913            // The payload may be dropped at this point, and we have to maintain provenance,
2914            // so use raw pointer manipulation.
2915            unsafe { &raw mut (*ptr).data }
2916        }
2917    }
2918
2919    /// Consumes the `Weak<T>` and turns it into a raw pointer.
2920    ///
2921    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2922    /// one weak reference (the weak count is not modified by this operation). It can be turned
2923    /// back into the `Weak<T>` with [`from_raw`].
2924    ///
2925    /// The same restrictions of accessing the target of the pointer as with
2926    /// [`as_ptr`] apply.
2927    ///
2928    /// # Examples
2929    ///
2930    /// ```
2931    /// use std::sync::{Arc, Weak};
2932    ///
2933    /// let strong = Arc::new("hello".to_owned());
2934    /// let weak = Arc::downgrade(&strong);
2935    /// let raw = weak.into_raw();
2936    ///
2937    /// assert_eq!(1, Arc::weak_count(&strong));
2938    /// assert_eq!("hello", unsafe { &*raw });
2939    ///
2940    /// drop(unsafe { Weak::from_raw(raw) });
2941    /// assert_eq!(0, Arc::weak_count(&strong));
2942    /// ```
2943    ///
2944    /// [`from_raw`]: Weak::from_raw
2945    /// [`as_ptr`]: Weak::as_ptr
2946    #[must_use = "losing the pointer will leak memory"]
2947    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2948    pub fn into_raw(self) -> *const T {
2949        ManuallyDrop::new(self).as_ptr()
2950    }
2951
2952    /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
2953    ///
2954    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2955    /// one weak reference (the weak count is not modified by this operation). It can be turned
2956    /// back into the `Weak<T>` with [`from_raw_in`].
2957    ///
2958    /// The same restrictions of accessing the target of the pointer as with
2959    /// [`as_ptr`] apply.
2960    ///
2961    /// # Examples
2962    ///
2963    /// ```
2964    /// #![feature(allocator_api)]
2965    /// use std::sync::{Arc, Weak};
2966    /// use std::alloc::System;
2967    ///
2968    /// let strong = Arc::new_in("hello".to_owned(), System);
2969    /// let weak = Arc::downgrade(&strong);
2970    /// let (raw, alloc) = weak.into_raw_with_allocator();
2971    ///
2972    /// assert_eq!(1, Arc::weak_count(&strong));
2973    /// assert_eq!("hello", unsafe { &*raw });
2974    ///
2975    /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
2976    /// assert_eq!(0, Arc::weak_count(&strong));
2977    /// ```
2978    ///
2979    /// [`from_raw_in`]: Weak::from_raw_in
2980    /// [`as_ptr`]: Weak::as_ptr
2981    #[must_use = "losing the pointer will leak memory"]
2982    #[unstable(feature = "allocator_api", issue = "32838")]
2983    pub fn into_raw_with_allocator(self) -> (*const T, A) {
2984        let this = mem::ManuallyDrop::new(self);
2985        let result = this.as_ptr();
2986        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
2987        let alloc = unsafe { ptr::read(&this.alloc) };
2988        (result, alloc)
2989    }
2990
2991    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2992    /// allocator.
2993    ///
2994    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2995    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2996    ///
2997    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2998    /// as these don't own anything; the method still works on them).
2999    ///
3000    /// # Safety
3001    ///
3002    /// The pointer must have originated from the [`into_raw`] and must still own its potential
3003    /// weak reference, and must point to a block of memory allocated by `alloc`.
3004    ///
3005    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3006    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3007    /// count is not modified by this operation) and therefore it must be paired with a previous
3008    /// call to [`into_raw`].
3009    /// # Examples
3010    ///
3011    /// ```
3012    /// use std::sync::{Arc, Weak};
3013    ///
3014    /// let strong = Arc::new("hello".to_owned());
3015    ///
3016    /// let raw_1 = Arc::downgrade(&strong).into_raw();
3017    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3018    ///
3019    /// assert_eq!(2, Arc::weak_count(&strong));
3020    ///
3021    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3022    /// assert_eq!(1, Arc::weak_count(&strong));
3023    ///
3024    /// drop(strong);
3025    ///
3026    /// // Decrement the last weak count.
3027    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3028    /// ```
3029    ///
3030    /// [`new`]: Weak::new
3031    /// [`into_raw`]: Weak::into_raw
3032    /// [`upgrade`]: Weak::upgrade
3033    #[inline]
3034    #[unstable(feature = "allocator_api", issue = "32838")]
3035    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3036        // See Weak::as_ptr for context on how the input pointer is derived.
3037
3038        let ptr = if is_dangling(ptr) {
3039            // This is a dangling Weak.
3040            ptr as *mut ArcInner<T>
3041        } else {
3042            // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3043            // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3044            let offset = unsafe { data_offset(ptr) };
3045            // Thus, we reverse the offset to get the whole RcInner.
3046            // SAFETY: the pointer originated from a Weak, so this offset is safe.
3047            unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3048        };
3049
3050        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3051        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3052    }
3053}
3054
3055impl<T: ?Sized, A: Allocator> Weak<T, A> {
3056    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3057    /// dropping of the inner value if successful.
3058    ///
3059    /// Returns [`None`] if the inner value has since been dropped.
3060    ///
3061    /// # Examples
3062    ///
3063    /// ```
3064    /// use std::sync::Arc;
3065    ///
3066    /// let five = Arc::new(5);
3067    ///
3068    /// let weak_five = Arc::downgrade(&five);
3069    ///
3070    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3071    /// assert!(strong_five.is_some());
3072    ///
3073    /// // Destroy all strong pointers.
3074    /// drop(strong_five);
3075    /// drop(five);
3076    ///
3077    /// assert!(weak_five.upgrade().is_none());
3078    /// ```
3079    #[must_use = "this returns a new `Arc`, \
3080                  without modifying the original weak pointer"]
3081    #[stable(feature = "arc_weak", since = "1.4.0")]
3082    pub fn upgrade(&self) -> Option<Arc<T, A>>
3083    where
3084        A: Clone,
3085    {
3086        #[inline]
3087        fn checked_increment(n: usize) -> Option<usize> {
3088            // Any write of 0 we can observe leaves the field in permanently zero state.
3089            if n == 0 {
3090                return None;
3091            }
3092            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3093            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3094            Some(n + 1)
3095        }
3096
3097        // We use a CAS loop to increment the strong count instead of a
3098        // fetch_add as this function should never take the reference count
3099        // from zero to one.
3100        //
3101        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3102        // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3103        // value can be initialized after `Weak` references have already been created. In that case, we
3104        // expect to observe the fully initialized value.
3105        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3106            // SAFETY: pointer is not null, verified in checked_increment
3107            unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3108        } else {
3109            None
3110        }
3111    }
3112
3113    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3114    ///
3115    /// If `self` was created using [`Weak::new`], this will return 0.
3116    #[must_use]
3117    #[stable(feature = "weak_counts", since = "1.41.0")]
3118    pub fn strong_count(&self) -> usize {
3119        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3120    }
3121
3122    /// Gets an approximation of the number of `Weak` pointers pointing to this
3123    /// allocation.
3124    ///
3125    /// If `self` was created using [`Weak::new`], or if there are no remaining
3126    /// strong pointers, this will return 0.
3127    ///
3128    /// # Accuracy
3129    ///
3130    /// Due to implementation details, the returned value can be off by 1 in
3131    /// either direction when other threads are manipulating any `Arc`s or
3132    /// `Weak`s pointing to the same allocation.
3133    #[must_use]
3134    #[stable(feature = "weak_counts", since = "1.41.0")]
3135    pub fn weak_count(&self) -> usize {
3136        if let Some(inner) = self.inner() {
3137            let weak = inner.weak.load(Acquire);
3138            let strong = inner.strong.load(Relaxed);
3139            if strong == 0 {
3140                0
3141            } else {
3142                // Since we observed that there was at least one strong pointer
3143                // after reading the weak count, we know that the implicit weak
3144                // reference (present whenever any strong references are alive)
3145                // was still around when we observed the weak count, and can
3146                // therefore safely subtract it.
3147                weak - 1
3148            }
3149        } else {
3150            0
3151        }
3152    }
3153
3154    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3155    /// (i.e., when this `Weak` was created by `Weak::new`).
3156    #[inline]
3157    fn inner(&self) -> Option<WeakInner<'_>> {
3158        let ptr = self.ptr.as_ptr();
3159        if is_dangling(ptr) {
3160            None
3161        } else {
3162            // We are careful to *not* create a reference covering the "data" field, as
3163            // the field may be mutated concurrently (for example, if the last `Arc`
3164            // is dropped, the data field will be dropped in-place).
3165            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3166        }
3167    }
3168
3169    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3170    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3171    /// this function ignores the metadata of  `dyn Trait` pointers.
3172    ///
3173    /// # Notes
3174    ///
3175    /// Since this compares pointers it means that `Weak::new()` will equal each
3176    /// other, even though they don't point to any allocation.
3177    ///
3178    /// # Examples
3179    ///
3180    /// ```
3181    /// use std::sync::Arc;
3182    ///
3183    /// let first_rc = Arc::new(5);
3184    /// let first = Arc::downgrade(&first_rc);
3185    /// let second = Arc::downgrade(&first_rc);
3186    ///
3187    /// assert!(first.ptr_eq(&second));
3188    ///
3189    /// let third_rc = Arc::new(5);
3190    /// let third = Arc::downgrade(&third_rc);
3191    ///
3192    /// assert!(!first.ptr_eq(&third));
3193    /// ```
3194    ///
3195    /// Comparing `Weak::new`.
3196    ///
3197    /// ```
3198    /// use std::sync::{Arc, Weak};
3199    ///
3200    /// let first = Weak::new();
3201    /// let second = Weak::new();
3202    /// assert!(first.ptr_eq(&second));
3203    ///
3204    /// let third_rc = Arc::new(());
3205    /// let third = Arc::downgrade(&third_rc);
3206    /// assert!(!first.ptr_eq(&third));
3207    /// ```
3208    ///
3209    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3210    #[inline]
3211    #[must_use]
3212    #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3213    pub fn ptr_eq(&self, other: &Self) -> bool {
3214        ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3215    }
3216}
3217
3218#[stable(feature = "arc_weak", since = "1.4.0")]
3219impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3220    /// Makes a clone of the `Weak` pointer that points to the same allocation.
3221    ///
3222    /// # Examples
3223    ///
3224    /// ```
3225    /// use std::sync::{Arc, Weak};
3226    ///
3227    /// let weak_five = Arc::downgrade(&Arc::new(5));
3228    ///
3229    /// let _ = Weak::clone(&weak_five);
3230    /// ```
3231    #[inline]
3232    fn clone(&self) -> Weak<T, A> {
3233        if let Some(inner) = self.inner() {
3234            // See comments in Arc::clone() for why this is relaxed. This can use a
3235            // fetch_add (ignoring the lock) because the weak count is only locked
3236            // where are *no other* weak pointers in existence. (So we can't be
3237            // running this code in that case).
3238            let old_size = inner.weak.fetch_add(1, Relaxed);
3239
3240            // See comments in Arc::clone() for why we do this (for mem::forget).
3241            if old_size > MAX_REFCOUNT {
3242                abort();
3243            }
3244        }
3245
3246        Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3247    }
3248}
3249
3250#[unstable(feature = "ergonomic_clones", issue = "132290")]
3251impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3252
3253#[stable(feature = "downgraded_weak", since = "1.10.0")]
3254impl<T> Default for Weak<T> {
3255    /// Constructs a new `Weak<T>`, without allocating memory.
3256    /// Calling [`upgrade`] on the return value always
3257    /// gives [`None`].
3258    ///
3259    /// [`upgrade`]: Weak::upgrade
3260    ///
3261    /// # Examples
3262    ///
3263    /// ```
3264    /// use std::sync::Weak;
3265    ///
3266    /// let empty: Weak<i64> = Default::default();
3267    /// assert!(empty.upgrade().is_none());
3268    /// ```
3269    fn default() -> Weak<T> {
3270        Weak::new()
3271    }
3272}
3273
3274#[stable(feature = "arc_weak", since = "1.4.0")]
3275unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3276    /// Drops the `Weak` pointer.
3277    ///
3278    /// # Examples
3279    ///
3280    /// ```
3281    /// use std::sync::{Arc, Weak};
3282    ///
3283    /// struct Foo;
3284    ///
3285    /// impl Drop for Foo {
3286    ///     fn drop(&mut self) {
3287    ///         println!("dropped!");
3288    ///     }
3289    /// }
3290    ///
3291    /// let foo = Arc::new(Foo);
3292    /// let weak_foo = Arc::downgrade(&foo);
3293    /// let other_weak_foo = Weak::clone(&weak_foo);
3294    ///
3295    /// drop(weak_foo);   // Doesn't print anything
3296    /// drop(foo);        // Prints "dropped!"
3297    ///
3298    /// assert!(other_weak_foo.upgrade().is_none());
3299    /// ```
3300    fn drop(&mut self) {
3301        // If we find out that we were the last weak pointer, then its time to
3302        // deallocate the data entirely. See the discussion in Arc::drop() about
3303        // the memory orderings
3304        //
3305        // It's not necessary to check for the locked state here, because the
3306        // weak count can only be locked if there was precisely one weak ref,
3307        // meaning that drop could only subsequently run ON that remaining weak
3308        // ref, which can only happen after the lock is released.
3309        let inner = if let Some(inner) = self.inner() { inner } else { return };
3310
3311        if inner.weak.fetch_sub(1, Release) == 1 {
3312            acquire!(inner.weak);
3313
3314            // Make sure we aren't trying to "deallocate" the shared static for empty slices
3315            // used by Default::default.
3316            debug_assert!(
3317                !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3318                "Arc/Weaks backed by a static should never be deallocated. \
3319                Likely decrement_strong_count or from_raw were called too many times.",
3320            );
3321
3322            unsafe {
3323                self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3324            }
3325        }
3326    }
3327}
3328
3329#[stable(feature = "rust1", since = "1.0.0")]
3330trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3331    fn eq(&self, other: &Arc<T, A>) -> bool;
3332    fn ne(&self, other: &Arc<T, A>) -> bool;
3333}
3334
3335#[stable(feature = "rust1", since = "1.0.0")]
3336impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3337    #[inline]
3338    default fn eq(&self, other: &Arc<T, A>) -> bool {
3339        **self == **other
3340    }
3341    #[inline]
3342    default fn ne(&self, other: &Arc<T, A>) -> bool {
3343        **self != **other
3344    }
3345}
3346
3347/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3348/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3349/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3350/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3351/// the same value, than two `&T`s.
3352///
3353/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3354#[stable(feature = "rust1", since = "1.0.0")]
3355impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3356    #[inline]
3357    fn eq(&self, other: &Arc<T, A>) -> bool {
3358        Arc::ptr_eq(self, other) || **self == **other
3359    }
3360
3361    #[inline]
3362    fn ne(&self, other: &Arc<T, A>) -> bool {
3363        !Arc::ptr_eq(self, other) && **self != **other
3364    }
3365}
3366
3367#[stable(feature = "rust1", since = "1.0.0")]
3368impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3369    /// Equality for two `Arc`s.
3370    ///
3371    /// Two `Arc`s are equal if their inner values are equal, even if they are
3372    /// stored in different allocation.
3373    ///
3374    /// If `T` also implements `Eq` (implying reflexivity of equality),
3375    /// two `Arc`s that point to the same allocation are always equal.
3376    ///
3377    /// # Examples
3378    ///
3379    /// ```
3380    /// use std::sync::Arc;
3381    ///
3382    /// let five = Arc::new(5);
3383    ///
3384    /// assert!(five == Arc::new(5));
3385    /// ```
3386    #[inline]
3387    fn eq(&self, other: &Arc<T, A>) -> bool {
3388        ArcEqIdent::eq(self, other)
3389    }
3390
3391    /// Inequality for two `Arc`s.
3392    ///
3393    /// Two `Arc`s are not equal if their inner values are not equal.
3394    ///
3395    /// If `T` also implements `Eq` (implying reflexivity of equality),
3396    /// two `Arc`s that point to the same value are always equal.
3397    ///
3398    /// # Examples
3399    ///
3400    /// ```
3401    /// use std::sync::Arc;
3402    ///
3403    /// let five = Arc::new(5);
3404    ///
3405    /// assert!(five != Arc::new(6));
3406    /// ```
3407    #[inline]
3408    fn ne(&self, other: &Arc<T, A>) -> bool {
3409        ArcEqIdent::ne(self, other)
3410    }
3411}
3412
3413#[stable(feature = "rust1", since = "1.0.0")]
3414impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3415    /// Partial comparison for two `Arc`s.
3416    ///
3417    /// The two are compared by calling `partial_cmp()` on their inner values.
3418    ///
3419    /// # Examples
3420    ///
3421    /// ```
3422    /// use std::sync::Arc;
3423    /// use std::cmp::Ordering;
3424    ///
3425    /// let five = Arc::new(5);
3426    ///
3427    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3428    /// ```
3429    fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3430        (**self).partial_cmp(&**other)
3431    }
3432
3433    /// Less-than comparison for two `Arc`s.
3434    ///
3435    /// The two are compared by calling `<` on their inner values.
3436    ///
3437    /// # Examples
3438    ///
3439    /// ```
3440    /// use std::sync::Arc;
3441    ///
3442    /// let five = Arc::new(5);
3443    ///
3444    /// assert!(five < Arc::new(6));
3445    /// ```
3446    fn lt(&self, other: &Arc<T, A>) -> bool {
3447        *(*self) < *(*other)
3448    }
3449
3450    /// 'Less than or equal to' comparison for two `Arc`s.
3451    ///
3452    /// The two are compared by calling `<=` on their inner values.
3453    ///
3454    /// # Examples
3455    ///
3456    /// ```
3457    /// use std::sync::Arc;
3458    ///
3459    /// let five = Arc::new(5);
3460    ///
3461    /// assert!(five <= Arc::new(5));
3462    /// ```
3463    fn le(&self, other: &Arc<T, A>) -> bool {
3464        *(*self) <= *(*other)
3465    }
3466
3467    /// Greater-than comparison for two `Arc`s.
3468    ///
3469    /// The two are compared by calling `>` on their inner values.
3470    ///
3471    /// # Examples
3472    ///
3473    /// ```
3474    /// use std::sync::Arc;
3475    ///
3476    /// let five = Arc::new(5);
3477    ///
3478    /// assert!(five > Arc::new(4));
3479    /// ```
3480    fn gt(&self, other: &Arc<T, A>) -> bool {
3481        *(*self) > *(*other)
3482    }
3483
3484    /// 'Greater than or equal to' comparison for two `Arc`s.
3485    ///
3486    /// The two are compared by calling `>=` on their inner values.
3487    ///
3488    /// # Examples
3489    ///
3490    /// ```
3491    /// use std::sync::Arc;
3492    ///
3493    /// let five = Arc::new(5);
3494    ///
3495    /// assert!(five >= Arc::new(5));
3496    /// ```
3497    fn ge(&self, other: &Arc<T, A>) -> bool {
3498        *(*self) >= *(*other)
3499    }
3500}
3501#[stable(feature = "rust1", since = "1.0.0")]
3502impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3503    /// Comparison for two `Arc`s.
3504    ///
3505    /// The two are compared by calling `cmp()` on their inner values.
3506    ///
3507    /// # Examples
3508    ///
3509    /// ```
3510    /// use std::sync::Arc;
3511    /// use std::cmp::Ordering;
3512    ///
3513    /// let five = Arc::new(5);
3514    ///
3515    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3516    /// ```
3517    fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3518        (**self).cmp(&**other)
3519    }
3520}
3521#[stable(feature = "rust1", since = "1.0.0")]
3522impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3523
3524#[stable(feature = "rust1", since = "1.0.0")]
3525impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3526    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3527        fmt::Display::fmt(&**self, f)
3528    }
3529}
3530
3531#[stable(feature = "rust1", since = "1.0.0")]
3532impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3533    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3534        fmt::Debug::fmt(&**self, f)
3535    }
3536}
3537
3538#[stable(feature = "rust1", since = "1.0.0")]
3539impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3540    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3541        fmt::Pointer::fmt(&(&raw const **self), f)
3542    }
3543}
3544
3545#[cfg(not(no_global_oom_handling))]
3546#[stable(feature = "rust1", since = "1.0.0")]
3547impl<T: Default> Default for Arc<T> {
3548    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3549    ///
3550    /// # Examples
3551    ///
3552    /// ```
3553    /// use std::sync::Arc;
3554    ///
3555    /// let x: Arc<i32> = Default::default();
3556    /// assert_eq!(*x, 0);
3557    /// ```
3558    fn default() -> Arc<T> {
3559        unsafe {
3560            Self::from_inner(
3561                Box::leak(Box::write(
3562                    Box::new_uninit(),
3563                    ArcInner {
3564                        strong: atomic::AtomicUsize::new(1),
3565                        weak: atomic::AtomicUsize::new(1),
3566                        data: T::default(),
3567                    },
3568                ))
3569                .into(),
3570            )
3571        }
3572    }
3573}
3574
3575/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3576/// returned by `Default::default`.
3577///
3578/// Layout notes:
3579/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3580/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3581/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3582#[repr(C, align(16))]
3583struct SliceArcInnerForStatic {
3584    inner: ArcInner<[u8; 1]>,
3585}
3586#[cfg(not(no_global_oom_handling))]
3587const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3588
3589static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3590    inner: ArcInner {
3591        strong: atomic::AtomicUsize::new(1),
3592        weak: atomic::AtomicUsize::new(1),
3593        data: [0],
3594    },
3595};
3596
3597#[cfg(not(no_global_oom_handling))]
3598#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3599impl Default for Arc<str> {
3600    /// Creates an empty str inside an Arc
3601    ///
3602    /// This may or may not share an allocation with other Arcs.
3603    #[inline]
3604    fn default() -> Self {
3605        let arc: Arc<[u8]> = Default::default();
3606        debug_assert!(core::str::from_utf8(&*arc).is_ok());
3607        let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3608        unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3609    }
3610}
3611
3612#[cfg(not(no_global_oom_handling))]
3613#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3614impl Default for Arc<core::ffi::CStr> {
3615    /// Creates an empty CStr inside an Arc
3616    ///
3617    /// This may or may not share an allocation with other Arcs.
3618    #[inline]
3619    fn default() -> Self {
3620        use core::ffi::CStr;
3621        let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3622        let inner: NonNull<ArcInner<CStr>> =
3623            NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3624        // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3625        let this: mem::ManuallyDrop<Arc<CStr>> =
3626            unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3627        (*this).clone()
3628    }
3629}
3630
3631#[cfg(not(no_global_oom_handling))]
3632#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3633impl<T> Default for Arc<[T]> {
3634    /// Creates an empty `[T]` inside an Arc
3635    ///
3636    /// This may or may not share an allocation with other Arcs.
3637    #[inline]
3638    fn default() -> Self {
3639        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3640            // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3641            // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3642            // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3643            // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3644            let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3645            let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3646            // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3647            let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3648                unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3649            return (*this).clone();
3650        }
3651
3652        // If T's alignment is too large for the static, make a new unique allocation.
3653        let arr: [T; 0] = [];
3654        Arc::from(arr)
3655    }
3656}
3657
3658#[stable(feature = "rust1", since = "1.0.0")]
3659impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3660    fn hash<H: Hasher>(&self, state: &mut H) {
3661        (**self).hash(state)
3662    }
3663}
3664
3665#[cfg(not(no_global_oom_handling))]
3666#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3667impl<T> From<T> for Arc<T> {
3668    /// Converts a `T` into an `Arc<T>`
3669    ///
3670    /// The conversion moves the value into a
3671    /// newly allocated `Arc`. It is equivalent to
3672    /// calling `Arc::new(t)`.
3673    ///
3674    /// # Example
3675    /// ```rust
3676    /// # use std::sync::Arc;
3677    /// let x = 5;
3678    /// let arc = Arc::new(5);
3679    ///
3680    /// assert_eq!(Arc::from(x), arc);
3681    /// ```
3682    fn from(t: T) -> Self {
3683        Arc::new(t)
3684    }
3685}
3686
3687#[cfg(not(no_global_oom_handling))]
3688#[stable(feature = "shared_from_array", since = "1.74.0")]
3689impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3690    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3691    ///
3692    /// The conversion moves the array into a newly allocated `Arc`.
3693    ///
3694    /// # Example
3695    ///
3696    /// ```
3697    /// # use std::sync::Arc;
3698    /// let original: [i32; 3] = [1, 2, 3];
3699    /// let shared: Arc<[i32]> = Arc::from(original);
3700    /// assert_eq!(&[1, 2, 3], &shared[..]);
3701    /// ```
3702    #[inline]
3703    fn from(v: [T; N]) -> Arc<[T]> {
3704        Arc::<[T; N]>::from(v)
3705    }
3706}
3707
3708#[cfg(not(no_global_oom_handling))]
3709#[stable(feature = "shared_from_slice", since = "1.21.0")]
3710impl<T: Clone> From<&[T]> for Arc<[T]> {
3711    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3712    ///
3713    /// # Example
3714    ///
3715    /// ```
3716    /// # use std::sync::Arc;
3717    /// let original: &[i32] = &[1, 2, 3];
3718    /// let shared: Arc<[i32]> = Arc::from(original);
3719    /// assert_eq!(&[1, 2, 3], &shared[..]);
3720    /// ```
3721    #[inline]
3722    fn from(v: &[T]) -> Arc<[T]> {
3723        <Self as ArcFromSlice<T>>::from_slice(v)
3724    }
3725}
3726
3727#[cfg(not(no_global_oom_handling))]
3728#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3729impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3730    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3731    ///
3732    /// # Example
3733    ///
3734    /// ```
3735    /// # use std::sync::Arc;
3736    /// let mut original = [1, 2, 3];
3737    /// let original: &mut [i32] = &mut original;
3738    /// let shared: Arc<[i32]> = Arc::from(original);
3739    /// assert_eq!(&[1, 2, 3], &shared[..]);
3740    /// ```
3741    #[inline]
3742    fn from(v: &mut [T]) -> Arc<[T]> {
3743        Arc::from(&*v)
3744    }
3745}
3746
3747#[cfg(not(no_global_oom_handling))]
3748#[stable(feature = "shared_from_slice", since = "1.21.0")]
3749impl From<&str> for Arc<str> {
3750    /// Allocates a reference-counted `str` and copies `v` into it.
3751    ///
3752    /// # Example
3753    ///
3754    /// ```
3755    /// # use std::sync::Arc;
3756    /// let shared: Arc<str> = Arc::from("eggplant");
3757    /// assert_eq!("eggplant", &shared[..]);
3758    /// ```
3759    #[inline]
3760    fn from(v: &str) -> Arc<str> {
3761        let arc = Arc::<[u8]>::from(v.as_bytes());
3762        unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3763    }
3764}
3765
3766#[cfg(not(no_global_oom_handling))]
3767#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3768impl From<&mut str> for Arc<str> {
3769    /// Allocates a reference-counted `str` and copies `v` into it.
3770    ///
3771    /// # Example
3772    ///
3773    /// ```
3774    /// # use std::sync::Arc;
3775    /// let mut original = String::from("eggplant");
3776    /// let original: &mut str = &mut original;
3777    /// let shared: Arc<str> = Arc::from(original);
3778    /// assert_eq!("eggplant", &shared[..]);
3779    /// ```
3780    #[inline]
3781    fn from(v: &mut str) -> Arc<str> {
3782        Arc::from(&*v)
3783    }
3784}
3785
3786#[cfg(not(no_global_oom_handling))]
3787#[stable(feature = "shared_from_slice", since = "1.21.0")]
3788impl From<String> for Arc<str> {
3789    /// Allocates a reference-counted `str` and copies `v` into it.
3790    ///
3791    /// # Example
3792    ///
3793    /// ```
3794    /// # use std::sync::Arc;
3795    /// let unique: String = "eggplant".to_owned();
3796    /// let shared: Arc<str> = Arc::from(unique);
3797    /// assert_eq!("eggplant", &shared[..]);
3798    /// ```
3799    #[inline]
3800    fn from(v: String) -> Arc<str> {
3801        Arc::from(&v[..])
3802    }
3803}
3804
3805#[cfg(not(no_global_oom_handling))]
3806#[stable(feature = "shared_from_slice", since = "1.21.0")]
3807impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3808    /// Move a boxed object to a new, reference-counted allocation.
3809    ///
3810    /// # Example
3811    ///
3812    /// ```
3813    /// # use std::sync::Arc;
3814    /// let unique: Box<str> = Box::from("eggplant");
3815    /// let shared: Arc<str> = Arc::from(unique);
3816    /// assert_eq!("eggplant", &shared[..]);
3817    /// ```
3818    #[inline]
3819    fn from(v: Box<T, A>) -> Arc<T, A> {
3820        Arc::from_box_in(v)
3821    }
3822}
3823
3824#[cfg(not(no_global_oom_handling))]
3825#[stable(feature = "shared_from_slice", since = "1.21.0")]
3826impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3827    /// Allocates a reference-counted slice and moves `v`'s items into it.
3828    ///
3829    /// # Example
3830    ///
3831    /// ```
3832    /// # use std::sync::Arc;
3833    /// let unique: Vec<i32> = vec![1, 2, 3];
3834    /// let shared: Arc<[i32]> = Arc::from(unique);
3835    /// assert_eq!(&[1, 2, 3], &shared[..]);
3836    /// ```
3837    #[inline]
3838    fn from(v: Vec<T, A>) -> Arc<[T], A> {
3839        unsafe {
3840            let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3841
3842            let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3843            ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3844
3845            // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3846            // without dropping its contents or the allocator
3847            let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3848
3849            Self::from_ptr_in(rc_ptr, alloc)
3850        }
3851    }
3852}
3853
3854#[stable(feature = "shared_from_cow", since = "1.45.0")]
3855impl<'a, B> From<Cow<'a, B>> for Arc<B>
3856where
3857    B: ToOwned + ?Sized,
3858    Arc<B>: From<&'a B> + From<B::Owned>,
3859{
3860    /// Creates an atomically reference-counted pointer from a clone-on-write
3861    /// pointer by copying its content.
3862    ///
3863    /// # Example
3864    ///
3865    /// ```rust
3866    /// # use std::sync::Arc;
3867    /// # use std::borrow::Cow;
3868    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3869    /// let shared: Arc<str> = Arc::from(cow);
3870    /// assert_eq!("eggplant", &shared[..]);
3871    /// ```
3872    #[inline]
3873    fn from(cow: Cow<'a, B>) -> Arc<B> {
3874        match cow {
3875            Cow::Borrowed(s) => Arc::from(s),
3876            Cow::Owned(s) => Arc::from(s),
3877        }
3878    }
3879}
3880
3881#[stable(feature = "shared_from_str", since = "1.62.0")]
3882impl From<Arc<str>> for Arc<[u8]> {
3883    /// Converts an atomically reference-counted string slice into a byte slice.
3884    ///
3885    /// # Example
3886    ///
3887    /// ```
3888    /// # use std::sync::Arc;
3889    /// let string: Arc<str> = Arc::from("eggplant");
3890    /// let bytes: Arc<[u8]> = Arc::from(string);
3891    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3892    /// ```
3893    #[inline]
3894    fn from(rc: Arc<str>) -> Self {
3895        // SAFETY: `str` has the same layout as `[u8]`.
3896        unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3897    }
3898}
3899
3900#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3901impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3902    type Error = Arc<[T], A>;
3903
3904    fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3905        if boxed_slice.len() == N {
3906            let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3907            Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
3908        } else {
3909            Err(boxed_slice)
3910        }
3911    }
3912}
3913
3914#[cfg(not(no_global_oom_handling))]
3915#[stable(feature = "shared_from_iter", since = "1.37.0")]
3916impl<T> FromIterator<T> for Arc<[T]> {
3917    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3918    ///
3919    /// # Performance characteristics
3920    ///
3921    /// ## The general case
3922    ///
3923    /// In the general case, collecting into `Arc<[T]>` is done by first
3924    /// collecting into a `Vec<T>`. That is, when writing the following:
3925    ///
3926    /// ```rust
3927    /// # use std::sync::Arc;
3928    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3929    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3930    /// ```
3931    ///
3932    /// this behaves as if we wrote:
3933    ///
3934    /// ```rust
3935    /// # use std::sync::Arc;
3936    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3937    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
3938    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
3939    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3940    /// ```
3941    ///
3942    /// This will allocate as many times as needed for constructing the `Vec<T>`
3943    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3944    ///
3945    /// ## Iterators of known length
3946    ///
3947    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3948    /// a single allocation will be made for the `Arc<[T]>`. For example:
3949    ///
3950    /// ```rust
3951    /// # use std::sync::Arc;
3952    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3953    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3954    /// ```
3955    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3956        ToArcSlice::to_arc_slice(iter.into_iter())
3957    }
3958}
3959
3960#[cfg(not(no_global_oom_handling))]
3961/// Specialization trait used for collecting into `Arc<[T]>`.
3962trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3963    fn to_arc_slice(self) -> Arc<[T]>;
3964}
3965
3966#[cfg(not(no_global_oom_handling))]
3967impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3968    default fn to_arc_slice(self) -> Arc<[T]> {
3969        self.collect::<Vec<T>>().into()
3970    }
3971}
3972
3973#[cfg(not(no_global_oom_handling))]
3974impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3975    fn to_arc_slice(self) -> Arc<[T]> {
3976        // This is the case for a `TrustedLen` iterator.
3977        let (low, high) = self.size_hint();
3978        if let Some(high) = high {
3979            debug_assert_eq!(
3980                low,
3981                high,
3982                "TrustedLen iterator's size hint is not exact: {:?}",
3983                (low, high)
3984            );
3985
3986            unsafe {
3987                // SAFETY: We need to ensure that the iterator has an exact length and we have.
3988                Arc::from_iter_exact(self, low)
3989            }
3990        } else {
3991            // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3992            // length exceeding `usize::MAX`.
3993            // The default implementation would collect into a vec which would panic.
3994            // Thus we panic here immediately without invoking `Vec` code.
3995            panic!("capacity overflow");
3996        }
3997    }
3998}
3999
4000#[stable(feature = "rust1", since = "1.0.0")]
4001impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4002    fn borrow(&self) -> &T {
4003        &**self
4004    }
4005}
4006
4007#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4008impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4009    fn as_ref(&self) -> &T {
4010        &**self
4011    }
4012}
4013
4014#[stable(feature = "pin", since = "1.33.0")]
4015impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4016
4017/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4018///
4019/// # Safety
4020///
4021/// The pointer must point to (and have valid metadata for) a previously
4022/// valid instance of T, but the T is allowed to be dropped.
4023unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4024    // Align the unsized value to the end of the ArcInner.
4025    // Because RcInner is repr(C), it will always be the last field in memory.
4026    // SAFETY: since the only unsized types possible are slices, trait objects,
4027    // and extern types, the input safety requirement is currently enough to
4028    // satisfy the requirements of align_of_val_raw; this is an implementation
4029    // detail of the language that must not be relied upon outside of std.
4030    unsafe { data_offset_align(align_of_val_raw(ptr)) }
4031}
4032
4033#[inline]
4034fn data_offset_align(align: usize) -> usize {
4035    let layout = Layout::new::<ArcInner<()>>();
4036    layout.size() + layout.padding_needed_for(align)
4037}
4038
4039/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4040/// but will deallocate it (without dropping the value) when dropped.
4041///
4042/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4043#[cfg(not(no_global_oom_handling))]
4044struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4045    ptr: NonNull<ArcInner<T>>,
4046    layout_for_value: Layout,
4047    alloc: Option<A>,
4048}
4049
4050#[cfg(not(no_global_oom_handling))]
4051impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4052    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4053    fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4054        let layout = Layout::for_value(for_value);
4055        let ptr = unsafe {
4056            Arc::allocate_for_layout(
4057                layout,
4058                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4059                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4060            )
4061        };
4062        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4063    }
4064
4065    /// Returns the pointer to be written into to initialize the [`Arc`].
4066    fn data_ptr(&mut self) -> *mut T {
4067        let offset = data_offset_align(self.layout_for_value.align());
4068        unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4069    }
4070
4071    /// Upgrade this into a normal [`Arc`].
4072    ///
4073    /// # Safety
4074    ///
4075    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4076    unsafe fn into_arc(self) -> Arc<T, A> {
4077        let mut this = ManuallyDrop::new(self);
4078        let ptr = this.ptr.as_ptr();
4079        let alloc = this.alloc.take().unwrap();
4080
4081        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4082        // for having initialized the data.
4083        unsafe { Arc::from_ptr_in(ptr, alloc) }
4084    }
4085}
4086
4087#[cfg(not(no_global_oom_handling))]
4088impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4089    fn drop(&mut self) {
4090        // SAFETY:
4091        // * new() produced a pointer safe to deallocate.
4092        // * We own the pointer unless into_arc() was called, which forgets us.
4093        unsafe {
4094            self.alloc.take().unwrap().deallocate(
4095                self.ptr.cast(),
4096                arcinner_layout_for_value_layout(self.layout_for_value),
4097            );
4098        }
4099    }
4100}
4101
4102#[stable(feature = "arc_error", since = "1.52.0")]
4103impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4104    #[allow(deprecated, deprecated_in_future)]
4105    fn description(&self) -> &str {
4106        core::error::Error::description(&**self)
4107    }
4108
4109    #[allow(deprecated)]
4110    fn cause(&self) -> Option<&dyn core::error::Error> {
4111        core::error::Error::cause(&**self)
4112    }
4113
4114    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4115        core::error::Error::source(&**self)
4116    }
4117
4118    fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4119        core::error::Error::provide(&**self, req);
4120    }
4121}
4122
4123/// A uniquely owned [`Arc`].
4124///
4125/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4126/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4127/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4128///
4129/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4130/// use case is to have an object be mutable during its initialization phase but then have it become
4131/// immutable and converted to a normal `Arc`.
4132///
4133/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4134///
4135/// ```
4136/// #![feature(unique_rc_arc)]
4137/// use std::sync::{Arc, Weak, UniqueArc};
4138///
4139/// struct Gadget {
4140///     me: Weak<Gadget>,
4141/// }
4142///
4143/// fn create_gadget() -> Option<Arc<Gadget>> {
4144///     let mut rc = UniqueArc::new(Gadget {
4145///         me: Weak::new(),
4146///     });
4147///     rc.me = UniqueArc::downgrade(&rc);
4148///     Some(UniqueArc::into_arc(rc))
4149/// }
4150///
4151/// create_gadget().unwrap();
4152/// ```
4153///
4154/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4155/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4156/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4157/// including fallible or async constructors.
4158#[unstable(feature = "unique_rc_arc", issue = "112566")]
4159pub struct UniqueArc<
4160    T: ?Sized,
4161    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4162> {
4163    ptr: NonNull<ArcInner<T>>,
4164    // Define the ownership of `ArcInner<T>` for drop-check
4165    _marker: PhantomData<ArcInner<T>>,
4166    // Invariance is necessary for soundness: once other `Weak`
4167    // references exist, we already have a form of shared mutability!
4168    _marker2: PhantomData<*mut T>,
4169    alloc: A,
4170}
4171
4172#[unstable(feature = "unique_rc_arc", issue = "112566")]
4173unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4174
4175#[unstable(feature = "unique_rc_arc", issue = "112566")]
4176unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4177
4178#[unstable(feature = "unique_rc_arc", issue = "112566")]
4179// #[unstable(feature = "coerce_unsized", issue = "18598")]
4180impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4181    for UniqueArc<T, A>
4182{
4183}
4184
4185//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4186#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4187impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4188
4189#[unstable(feature = "unique_rc_arc", issue = "112566")]
4190impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4191    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4192        fmt::Display::fmt(&**self, f)
4193    }
4194}
4195
4196#[unstable(feature = "unique_rc_arc", issue = "112566")]
4197impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4198    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4199        fmt::Debug::fmt(&**self, f)
4200    }
4201}
4202
4203#[unstable(feature = "unique_rc_arc", issue = "112566")]
4204impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4205    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4206        fmt::Pointer::fmt(&(&raw const **self), f)
4207    }
4208}
4209
4210#[unstable(feature = "unique_rc_arc", issue = "112566")]
4211impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4212    fn borrow(&self) -> &T {
4213        &**self
4214    }
4215}
4216
4217#[unstable(feature = "unique_rc_arc", issue = "112566")]
4218impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4219    fn borrow_mut(&mut self) -> &mut T {
4220        &mut **self
4221    }
4222}
4223
4224#[unstable(feature = "unique_rc_arc", issue = "112566")]
4225impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4226    fn as_ref(&self) -> &T {
4227        &**self
4228    }
4229}
4230
4231#[unstable(feature = "unique_rc_arc", issue = "112566")]
4232impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4233    fn as_mut(&mut self) -> &mut T {
4234        &mut **self
4235    }
4236}
4237
4238#[unstable(feature = "unique_rc_arc", issue = "112566")]
4239impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4240
4241#[unstable(feature = "unique_rc_arc", issue = "112566")]
4242impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4243    /// Equality for two `UniqueArc`s.
4244    ///
4245    /// Two `UniqueArc`s are equal if their inner values are equal.
4246    ///
4247    /// # Examples
4248    ///
4249    /// ```
4250    /// #![feature(unique_rc_arc)]
4251    /// use std::sync::UniqueArc;
4252    ///
4253    /// let five = UniqueArc::new(5);
4254    ///
4255    /// assert!(five == UniqueArc::new(5));
4256    /// ```
4257    #[inline]
4258    fn eq(&self, other: &Self) -> bool {
4259        PartialEq::eq(&**self, &**other)
4260    }
4261}
4262
4263#[unstable(feature = "unique_rc_arc", issue = "112566")]
4264impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4265    /// Partial comparison for two `UniqueArc`s.
4266    ///
4267    /// The two are compared by calling `partial_cmp()` on their inner values.
4268    ///
4269    /// # Examples
4270    ///
4271    /// ```
4272    /// #![feature(unique_rc_arc)]
4273    /// use std::sync::UniqueArc;
4274    /// use std::cmp::Ordering;
4275    ///
4276    /// let five = UniqueArc::new(5);
4277    ///
4278    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4279    /// ```
4280    #[inline(always)]
4281    fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4282        (**self).partial_cmp(&**other)
4283    }
4284
4285    /// Less-than comparison for two `UniqueArc`s.
4286    ///
4287    /// The two are compared by calling `<` on their inner values.
4288    ///
4289    /// # Examples
4290    ///
4291    /// ```
4292    /// #![feature(unique_rc_arc)]
4293    /// use std::sync::UniqueArc;
4294    ///
4295    /// let five = UniqueArc::new(5);
4296    ///
4297    /// assert!(five < UniqueArc::new(6));
4298    /// ```
4299    #[inline(always)]
4300    fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4301        **self < **other
4302    }
4303
4304    /// 'Less than or equal to' comparison for two `UniqueArc`s.
4305    ///
4306    /// The two are compared by calling `<=` on their inner values.
4307    ///
4308    /// # Examples
4309    ///
4310    /// ```
4311    /// #![feature(unique_rc_arc)]
4312    /// use std::sync::UniqueArc;
4313    ///
4314    /// let five = UniqueArc::new(5);
4315    ///
4316    /// assert!(five <= UniqueArc::new(5));
4317    /// ```
4318    #[inline(always)]
4319    fn le(&self, other: &UniqueArc<T, A>) -> bool {
4320        **self <= **other
4321    }
4322
4323    /// Greater-than comparison for two `UniqueArc`s.
4324    ///
4325    /// The two are compared by calling `>` on their inner values.
4326    ///
4327    /// # Examples
4328    ///
4329    /// ```
4330    /// #![feature(unique_rc_arc)]
4331    /// use std::sync::UniqueArc;
4332    ///
4333    /// let five = UniqueArc::new(5);
4334    ///
4335    /// assert!(five > UniqueArc::new(4));
4336    /// ```
4337    #[inline(always)]
4338    fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4339        **self > **other
4340    }
4341
4342    /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4343    ///
4344    /// The two are compared by calling `>=` on their inner values.
4345    ///
4346    /// # Examples
4347    ///
4348    /// ```
4349    /// #![feature(unique_rc_arc)]
4350    /// use std::sync::UniqueArc;
4351    ///
4352    /// let five = UniqueArc::new(5);
4353    ///
4354    /// assert!(five >= UniqueArc::new(5));
4355    /// ```
4356    #[inline(always)]
4357    fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4358        **self >= **other
4359    }
4360}
4361
4362#[unstable(feature = "unique_rc_arc", issue = "112566")]
4363impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4364    /// Comparison for two `UniqueArc`s.
4365    ///
4366    /// The two are compared by calling `cmp()` on their inner values.
4367    ///
4368    /// # Examples
4369    ///
4370    /// ```
4371    /// #![feature(unique_rc_arc)]
4372    /// use std::sync::UniqueArc;
4373    /// use std::cmp::Ordering;
4374    ///
4375    /// let five = UniqueArc::new(5);
4376    ///
4377    /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4378    /// ```
4379    #[inline]
4380    fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4381        (**self).cmp(&**other)
4382    }
4383}
4384
4385#[unstable(feature = "unique_rc_arc", issue = "112566")]
4386impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4387
4388#[unstable(feature = "unique_rc_arc", issue = "112566")]
4389impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4390    fn hash<H: Hasher>(&self, state: &mut H) {
4391        (**self).hash(state);
4392    }
4393}
4394
4395impl<T> UniqueArc<T, Global> {
4396    /// Creates a new `UniqueArc`.
4397    ///
4398    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4399    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4400    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4401    /// point to the new [`Arc`].
4402    #[cfg(not(no_global_oom_handling))]
4403    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4404    #[must_use]
4405    pub fn new(value: T) -> Self {
4406        Self::new_in(value, Global)
4407    }
4408}
4409
4410impl<T, A: Allocator> UniqueArc<T, A> {
4411    /// Creates a new `UniqueArc` in the provided allocator.
4412    ///
4413    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4414    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4415    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4416    /// point to the new [`Arc`].
4417    #[cfg(not(no_global_oom_handling))]
4418    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4419    #[must_use]
4420    // #[unstable(feature = "allocator_api", issue = "32838")]
4421    pub fn new_in(data: T, alloc: A) -> Self {
4422        let (ptr, alloc) = Box::into_unique(Box::new_in(
4423            ArcInner {
4424                strong: atomic::AtomicUsize::new(0),
4425                // keep one weak reference so if all the weak pointers that are created are dropped
4426                // the UniqueArc still stays valid.
4427                weak: atomic::AtomicUsize::new(1),
4428                data,
4429            },
4430            alloc,
4431        ));
4432        Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4433    }
4434}
4435
4436impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4437    /// Converts the `UniqueArc` into a regular [`Arc`].
4438    ///
4439    /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4440    /// is passed to `into_arc`.
4441    ///
4442    /// Any weak references created before this method is called can now be upgraded to strong
4443    /// references.
4444    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4445    #[must_use]
4446    pub fn into_arc(this: Self) -> Arc<T, A> {
4447        let this = ManuallyDrop::new(this);
4448
4449        // Move the allocator out.
4450        // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4451        // a `ManuallyDrop`.
4452        let alloc: A = unsafe { ptr::read(&this.alloc) };
4453
4454        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4455        unsafe {
4456            // Convert our weak reference into a strong reference
4457            (*this.ptr.as_ptr()).strong.store(1, Release);
4458            Arc::from_inner_in(this.ptr, alloc)
4459        }
4460    }
4461}
4462
4463impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4464    /// Creates a new weak reference to the `UniqueArc`.
4465    ///
4466    /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4467    /// to a [`Arc`] using [`UniqueArc::into_arc`].
4468    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4469    #[must_use]
4470    pub fn downgrade(this: &Self) -> Weak<T, A> {
4471        // Using a relaxed ordering is alright here, as knowledge of the
4472        // original reference prevents other threads from erroneously deleting
4473        // the object or converting the object to a normal `Arc<T, A>`.
4474        //
4475        // Note that we don't need to test if the weak counter is locked because there
4476        // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4477        // the weak counter.
4478        //
4479        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4480        let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4481
4482        // See comments in Arc::clone() for why we do this (for mem::forget).
4483        if old_size > MAX_REFCOUNT {
4484            abort();
4485        }
4486
4487        Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4488    }
4489}
4490
4491#[unstable(feature = "unique_rc_arc", issue = "112566")]
4492impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4493    type Target = T;
4494
4495    fn deref(&self) -> &T {
4496        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4497        unsafe { &self.ptr.as_ref().data }
4498    }
4499}
4500
4501// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4502#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4503unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4504
4505#[unstable(feature = "unique_rc_arc", issue = "112566")]
4506impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4507    fn deref_mut(&mut self) -> &mut T {
4508        // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4509        // have unique ownership and therefore it's safe to make a mutable reference because
4510        // `UniqueArc` owns the only strong reference to itself.
4511        // We also need to be careful to only create a mutable reference to the `data` field,
4512        // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4513        // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4514        unsafe { &mut (*self.ptr.as_ptr()).data }
4515    }
4516}
4517
4518#[unstable(feature = "unique_rc_arc", issue = "112566")]
4519// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4520unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4521
4522#[unstable(feature = "unique_rc_arc", issue = "112566")]
4523unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4524    fn drop(&mut self) {
4525        // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4526        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4527        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4528
4529        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4530    }
4531}
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy