alloc/
sync.rs

1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12#[cfg(not(no_global_oom_handling))]
13use core::clone::CloneToUninit;
14use core::clone::UseCloned;
15use core::cmp::Ordering;
16use core::hash::{Hash, Hasher};
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21use core::mem::{self, ManuallyDrop, align_of_val_raw};
22use core::num::NonZeroUsize;
23use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
24use core::panic::{RefUnwindSafe, UnwindSafe};
25use core::pin::{Pin, PinCoerceUnsized};
26use core::ptr::{self, NonNull};
27#[cfg(not(no_global_oom_handling))]
28use core::slice::from_raw_parts_mut;
29use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
30use core::sync::atomic::{self, Atomic};
31use core::{borrow, fmt, hint};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35use crate::alloc::{AllocError, Allocator, Global, Layout};
36use crate::borrow::{Cow, ToOwned};
37use crate::boxed::Box;
38use crate::rc::is_dangling;
39#[cfg(not(no_global_oom_handling))]
40use crate::string::String;
41#[cfg(not(no_global_oom_handling))]
42use crate::vec::Vec;
43
44/// A soft limit on the amount of references that may be made to an `Arc`.
45///
46/// Going above this limit will abort your program (although not
47/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
48/// Trying to go above it might call a `panic` (if not actually going above it).
49///
50/// This is a global invariant, and also applies when using a compare-exchange loop.
51///
52/// See comment in `Arc::clone`.
53const MAX_REFCOUNT: usize = (isize::MAX) as usize;
54
55/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
56const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
57
58#[cfg(not(sanitize = "thread"))]
59macro_rules! acquire {
60    ($x:expr) => {
61        atomic::fence(Acquire)
62    };
63}
64
65// ThreadSanitizer does not support memory fences. To avoid false positive
66// reports in Arc / Weak implementation use atomic loads for synchronization
67// instead.
68#[cfg(sanitize = "thread")]
69macro_rules! acquire {
70    ($x:expr) => {
71        $x.load(Acquire)
72    };
73}
74
75/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
76/// Reference Counted'.
77///
78/// The type `Arc<T>` provides shared ownership of a value of type `T`,
79/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
80/// a new `Arc` instance, which points to the same allocation on the heap as the
81/// source `Arc`, while increasing a reference count. When the last `Arc`
82/// pointer to a given allocation is destroyed, the value stored in that allocation (often
83/// referred to as "inner value") is also dropped.
84///
85/// Shared references in Rust disallow mutation by default, and `Arc` is no
86/// exception: you cannot generally obtain a mutable reference to something
87/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
88///
89/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
90///    [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
91///
92/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
93///    without requiring interior mutability. This approach clones the data only when
94///    needed (when there are multiple references) and can be more efficient when mutations
95///    are infrequent.
96///
97/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
98///    which provides direct mutable access to the inner value without any cloning.
99///
100/// ```
101/// use std::sync::Arc;
102///
103/// let mut data = Arc::new(vec![1, 2, 3]);
104///
105/// // This will clone the vector only if there are other references to it
106/// Arc::make_mut(&mut data).push(4);
107///
108/// assert_eq!(*data, vec![1, 2, 3, 4]);
109/// ```
110///
111/// **Note**: This type is only available on platforms that support atomic
112/// loads and stores of pointers, which includes all platforms that support
113/// the `std` crate but not all those which only support [`alloc`](crate).
114/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
115///
116/// ## Thread Safety
117///
118/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
119/// counting. This means that it is thread-safe. The disadvantage is that
120/// atomic operations are more expensive than ordinary memory accesses. If you
121/// are not sharing reference-counted allocations between threads, consider using
122/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
123/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
124/// However, a library might choose `Arc<T>` in order to give library consumers
125/// more flexibility.
126///
127/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
128/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
129/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
130/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
131/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
132/// data, but it  doesn't add thread safety to its data. Consider
133/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
134/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
135/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
136/// non-atomic operations.
137///
138/// In the end, this means that you may need to pair `Arc<T>` with some sort of
139/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
140///
141/// ## Breaking cycles with `Weak`
142///
143/// The [`downgrade`][downgrade] method can be used to create a non-owning
144/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
145/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
146/// already been dropped. In other words, `Weak` pointers do not keep the value
147/// inside the allocation alive; however, they *do* keep the allocation
148/// (the backing store for the value) alive.
149///
150/// A cycle between `Arc` pointers will never be deallocated. For this reason,
151/// [`Weak`] is used to break cycles. For example, a tree could have
152/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
153/// pointers from children back to their parents.
154///
155/// # Cloning references
156///
157/// Creating a new reference from an existing reference-counted pointer is done using the
158/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
159///
160/// ```
161/// use std::sync::Arc;
162/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
163/// // The two syntaxes below are equivalent.
164/// let a = foo.clone();
165/// let b = Arc::clone(&foo);
166/// // a, b, and foo are all Arcs that point to the same memory location
167/// ```
168///
169/// ## `Deref` behavior
170///
171/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
172/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
173/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
174/// functions, called using [fully qualified syntax]:
175///
176/// ```
177/// use std::sync::Arc;
178///
179/// let my_arc = Arc::new(());
180/// let my_weak = Arc::downgrade(&my_arc);
181/// ```
182///
183/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
184/// fully qualified syntax. Some people prefer to use fully qualified syntax,
185/// while others prefer using method-call syntax.
186///
187/// ```
188/// use std::sync::Arc;
189///
190/// let arc = Arc::new(());
191/// // Method-call syntax
192/// let arc2 = arc.clone();
193/// // Fully qualified syntax
194/// let arc3 = Arc::clone(&arc);
195/// ```
196///
197/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
198/// already been dropped.
199///
200/// [`Rc<T>`]: crate::rc::Rc
201/// [clone]: Clone::clone
202/// [mutex]: ../../std/sync/struct.Mutex.html
203/// [rwlock]: ../../std/sync/struct.RwLock.html
204/// [atomic]: core::sync::atomic
205/// [downgrade]: Arc::downgrade
206/// [upgrade]: Weak::upgrade
207/// [RefCell\<T>]: core::cell::RefCell
208/// [`RefCell<T>`]: core::cell::RefCell
209/// [`std::sync`]: ../../std/sync/index.html
210/// [`Arc::clone(&from)`]: Arc::clone
211/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
212///
213/// # Examples
214///
215/// Sharing some immutable data between threads:
216///
217/// ```
218/// use std::sync::Arc;
219/// use std::thread;
220///
221/// let five = Arc::new(5);
222///
223/// for _ in 0..10 {
224///     let five = Arc::clone(&five);
225///
226///     thread::spawn(move || {
227///         println!("{five:?}");
228///     });
229/// }
230/// ```
231///
232/// Sharing a mutable [`AtomicUsize`]:
233///
234/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
235///
236/// ```
237/// use std::sync::Arc;
238/// use std::sync::atomic::{AtomicUsize, Ordering};
239/// use std::thread;
240///
241/// let val = Arc::new(AtomicUsize::new(5));
242///
243/// for _ in 0..10 {
244///     let val = Arc::clone(&val);
245///
246///     thread::spawn(move || {
247///         let v = val.fetch_add(1, Ordering::Relaxed);
248///         println!("{v:?}");
249///     });
250/// }
251/// ```
252///
253/// See the [`rc` documentation][rc_examples] for more examples of reference
254/// counting in general.
255///
256/// [rc_examples]: crate::rc#examples
257#[doc(search_unbox)]
258#[rustc_diagnostic_item = "Arc"]
259#[stable(feature = "rust1", since = "1.0.0")]
260#[rustc_insignificant_dtor]
261pub struct Arc<
262    T: ?Sized,
263    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
264> {
265    ptr: NonNull<ArcInner<T>>,
266    phantom: PhantomData<ArcInner<T>>,
267    alloc: A,
268}
269
270#[stable(feature = "rust1", since = "1.0.0")]
271unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
272#[stable(feature = "rust1", since = "1.0.0")]
273unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
274
275#[stable(feature = "catch_unwind", since = "1.9.0")]
276impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
277
278#[unstable(feature = "coerce_unsized", issue = "18598")]
279impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
280
281#[unstable(feature = "dispatch_from_dyn", issue = "none")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
283
284impl<T: ?Sized> Arc<T> {
285    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
286        unsafe { Self::from_inner_in(ptr, Global) }
287    }
288
289    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
290        unsafe { Self::from_ptr_in(ptr, Global) }
291    }
292}
293
294impl<T: ?Sized, A: Allocator> Arc<T, A> {
295    #[inline]
296    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
297        let this = mem::ManuallyDrop::new(this);
298        (this.ptr, unsafe { ptr::read(&this.alloc) })
299    }
300
301    #[inline]
302    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
303        Self { ptr, phantom: PhantomData, alloc }
304    }
305
306    #[inline]
307    unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
308        unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
309    }
310}
311
312/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
313/// managed allocation.
314///
315/// The allocation is accessed by calling [`upgrade`] on the `Weak`
316/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
317///
318/// Since a `Weak` reference does not count towards ownership, it will not
319/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
320/// guarantees about the value still being present. Thus it may return [`None`]
321/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
322/// itself (the backing store) from being deallocated.
323///
324/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
325/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
326/// prevent circular references between [`Arc`] pointers, since mutual owning references
327/// would never allow either [`Arc`] to be dropped. For example, a tree could
328/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
329/// pointers from children back to their parents.
330///
331/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
332///
333/// [`upgrade`]: Weak::upgrade
334#[stable(feature = "arc_weak", since = "1.4.0")]
335#[rustc_diagnostic_item = "ArcWeak"]
336pub struct Weak<
337    T: ?Sized,
338    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
339> {
340    // This is a `NonNull` to allow optimizing the size of this type in enums,
341    // but it is not necessarily a valid pointer.
342    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
343    // to allocate space on the heap. That's not a value a real pointer
344    // will ever have because RcInner has alignment at least 2.
345    ptr: NonNull<ArcInner<T>>,
346    alloc: A,
347}
348
349#[stable(feature = "arc_weak", since = "1.4.0")]
350unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
351#[stable(feature = "arc_weak", since = "1.4.0")]
352unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
353
354#[unstable(feature = "coerce_unsized", issue = "18598")]
355impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
356#[unstable(feature = "dispatch_from_dyn", issue = "none")]
357impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
358
359#[stable(feature = "arc_weak", since = "1.4.0")]
360impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
361    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
362        write!(f, "(Weak)")
363    }
364}
365
366// This is repr(C) to future-proof against possible field-reordering, which
367// would interfere with otherwise safe [into|from]_raw() of transmutable
368// inner types.
369#[repr(C)]
370struct ArcInner<T: ?Sized> {
371    strong: Atomic<usize>,
372
373    // the value usize::MAX acts as a sentinel for temporarily "locking" the
374    // ability to upgrade weak pointers or downgrade strong ones; this is used
375    // to avoid races in `make_mut` and `get_mut`.
376    weak: Atomic<usize>,
377
378    data: T,
379}
380
381/// Calculate layout for `ArcInner<T>` using the inner value's layout
382fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
383    // Calculate layout using the given value layout.
384    // Previously, layout was calculated on the expression
385    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
386    // reference (see #54908).
387    Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
388}
389
390unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
391unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
392
393impl<T> Arc<T> {
394    /// Constructs a new `Arc<T>`.
395    ///
396    /// # Examples
397    ///
398    /// ```
399    /// use std::sync::Arc;
400    ///
401    /// let five = Arc::new(5);
402    /// ```
403    #[cfg(not(no_global_oom_handling))]
404    #[inline]
405    #[stable(feature = "rust1", since = "1.0.0")]
406    pub fn new(data: T) -> Arc<T> {
407        // Start the weak pointer count as 1 which is the weak pointer that's
408        // held by all the strong pointers (kinda), see std/rc.rs for more info
409        let x: Box<_> = Box::new(ArcInner {
410            strong: atomic::AtomicUsize::new(1),
411            weak: atomic::AtomicUsize::new(1),
412            data,
413        });
414        unsafe { Self::from_inner(Box::leak(x).into()) }
415    }
416
417    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
418    /// to allow you to construct a `T` which holds a weak pointer to itself.
419    ///
420    /// Generally, a structure circularly referencing itself, either directly or
421    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
422    /// Using this function, you get access to the weak pointer during the
423    /// initialization of `T`, before the `Arc<T>` is created, such that you can
424    /// clone and store it inside the `T`.
425    ///
426    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
427    /// then calls your closure, giving it a `Weak<T>` to this allocation,
428    /// and only afterwards completes the construction of the `Arc<T>` by placing
429    /// the `T` returned from your closure into the allocation.
430    ///
431    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
432    /// returns, calling [`upgrade`] on the weak reference inside your closure will
433    /// fail and result in a `None` value.
434    ///
435    /// # Panics
436    ///
437    /// If `data_fn` panics, the panic is propagated to the caller, and the
438    /// temporary [`Weak<T>`] is dropped normally.
439    ///
440    /// # Example
441    ///
442    /// ```
443    /// # #![allow(dead_code)]
444    /// use std::sync::{Arc, Weak};
445    ///
446    /// struct Gadget {
447    ///     me: Weak<Gadget>,
448    /// }
449    ///
450    /// impl Gadget {
451    ///     /// Constructs a reference counted Gadget.
452    ///     fn new() -> Arc<Self> {
453    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
454    ///         // `Arc` we're constructing.
455    ///         Arc::new_cyclic(|me| {
456    ///             // Create the actual struct here.
457    ///             Gadget { me: me.clone() }
458    ///         })
459    ///     }
460    ///
461    ///     /// Returns a reference counted pointer to Self.
462    ///     fn me(&self) -> Arc<Self> {
463    ///         self.me.upgrade().unwrap()
464    ///     }
465    /// }
466    /// ```
467    /// [`upgrade`]: Weak::upgrade
468    #[cfg(not(no_global_oom_handling))]
469    #[inline]
470    #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
471    pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
472    where
473        F: FnOnce(&Weak<T>) -> T,
474    {
475        Self::new_cyclic_in(data_fn, Global)
476    }
477
478    /// Constructs a new `Arc` with uninitialized contents.
479    ///
480    /// # Examples
481    ///
482    /// ```
483    /// #![feature(get_mut_unchecked)]
484    ///
485    /// use std::sync::Arc;
486    ///
487    /// let mut five = Arc::<u32>::new_uninit();
488    ///
489    /// // Deferred initialization:
490    /// Arc::get_mut(&mut five).unwrap().write(5);
491    ///
492    /// let five = unsafe { five.assume_init() };
493    ///
494    /// assert_eq!(*five, 5)
495    /// ```
496    #[cfg(not(no_global_oom_handling))]
497    #[inline]
498    #[stable(feature = "new_uninit", since = "1.82.0")]
499    #[must_use]
500    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
501        unsafe {
502            Arc::from_ptr(Arc::allocate_for_layout(
503                Layout::new::<T>(),
504                |layout| Global.allocate(layout),
505                <*mut u8>::cast,
506            ))
507        }
508    }
509
510    /// Constructs a new `Arc` with uninitialized contents, with the memory
511    /// being filled with `0` bytes.
512    ///
513    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
514    /// of this method.
515    ///
516    /// # Examples
517    ///
518    /// ```
519    /// #![feature(new_zeroed_alloc)]
520    ///
521    /// use std::sync::Arc;
522    ///
523    /// let zero = Arc::<u32>::new_zeroed();
524    /// let zero = unsafe { zero.assume_init() };
525    ///
526    /// assert_eq!(*zero, 0)
527    /// ```
528    ///
529    /// [zeroed]: mem::MaybeUninit::zeroed
530    #[cfg(not(no_global_oom_handling))]
531    #[inline]
532    #[unstable(feature = "new_zeroed_alloc", issue = "129396")]
533    #[must_use]
534    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
535        unsafe {
536            Arc::from_ptr(Arc::allocate_for_layout(
537                Layout::new::<T>(),
538                |layout| Global.allocate_zeroed(layout),
539                <*mut u8>::cast,
540            ))
541        }
542    }
543
544    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
545    /// `data` will be pinned in memory and unable to be moved.
546    #[cfg(not(no_global_oom_handling))]
547    #[stable(feature = "pin", since = "1.33.0")]
548    #[must_use]
549    pub fn pin(data: T) -> Pin<Arc<T>> {
550        unsafe { Pin::new_unchecked(Arc::new(data)) }
551    }
552
553    /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
554    #[unstable(feature = "allocator_api", issue = "32838")]
555    #[inline]
556    pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
557        unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
558    }
559
560    /// Constructs a new `Arc<T>`, returning an error if allocation fails.
561    ///
562    /// # Examples
563    ///
564    /// ```
565    /// #![feature(allocator_api)]
566    /// use std::sync::Arc;
567    ///
568    /// let five = Arc::try_new(5)?;
569    /// # Ok::<(), std::alloc::AllocError>(())
570    /// ```
571    #[unstable(feature = "allocator_api", issue = "32838")]
572    #[inline]
573    pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
574        // Start the weak pointer count as 1 which is the weak pointer that's
575        // held by all the strong pointers (kinda), see std/rc.rs for more info
576        let x: Box<_> = Box::try_new(ArcInner {
577            strong: atomic::AtomicUsize::new(1),
578            weak: atomic::AtomicUsize::new(1),
579            data,
580        })?;
581        unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
582    }
583
584    /// Constructs a new `Arc` with uninitialized contents, returning an error
585    /// if allocation fails.
586    ///
587    /// # Examples
588    ///
589    /// ```
590    /// #![feature(allocator_api)]
591    /// #![feature(get_mut_unchecked)]
592    ///
593    /// use std::sync::Arc;
594    ///
595    /// let mut five = Arc::<u32>::try_new_uninit()?;
596    ///
597    /// // Deferred initialization:
598    /// Arc::get_mut(&mut five).unwrap().write(5);
599    ///
600    /// let five = unsafe { five.assume_init() };
601    ///
602    /// assert_eq!(*five, 5);
603    /// # Ok::<(), std::alloc::AllocError>(())
604    /// ```
605    #[unstable(feature = "allocator_api", issue = "32838")]
606    // #[unstable(feature = "new_uninit", issue = "63291")]
607    pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
608        unsafe {
609            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
610                Layout::new::<T>(),
611                |layout| Global.allocate(layout),
612                <*mut u8>::cast,
613            )?))
614        }
615    }
616
617    /// Constructs a new `Arc` with uninitialized contents, with the memory
618    /// being filled with `0` bytes, returning an error if allocation fails.
619    ///
620    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
621    /// of this method.
622    ///
623    /// # Examples
624    ///
625    /// ```
626    /// #![feature( allocator_api)]
627    ///
628    /// use std::sync::Arc;
629    ///
630    /// let zero = Arc::<u32>::try_new_zeroed()?;
631    /// let zero = unsafe { zero.assume_init() };
632    ///
633    /// assert_eq!(*zero, 0);
634    /// # Ok::<(), std::alloc::AllocError>(())
635    /// ```
636    ///
637    /// [zeroed]: mem::MaybeUninit::zeroed
638    #[unstable(feature = "allocator_api", issue = "32838")]
639    // #[unstable(feature = "new_uninit", issue = "63291")]
640    pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
641        unsafe {
642            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
643                Layout::new::<T>(),
644                |layout| Global.allocate_zeroed(layout),
645                <*mut u8>::cast,
646            )?))
647        }
648    }
649}
650
651impl<T, A: Allocator> Arc<T, A> {
652    /// Constructs a new `Arc<T>` in the provided allocator.
653    ///
654    /// # Examples
655    ///
656    /// ```
657    /// #![feature(allocator_api)]
658    ///
659    /// use std::sync::Arc;
660    /// use std::alloc::System;
661    ///
662    /// let five = Arc::new_in(5, System);
663    /// ```
664    #[inline]
665    #[cfg(not(no_global_oom_handling))]
666    #[unstable(feature = "allocator_api", issue = "32838")]
667    pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
668        // Start the weak pointer count as 1 which is the weak pointer that's
669        // held by all the strong pointers (kinda), see std/rc.rs for more info
670        let x = Box::new_in(
671            ArcInner {
672                strong: atomic::AtomicUsize::new(1),
673                weak: atomic::AtomicUsize::new(1),
674                data,
675            },
676            alloc,
677        );
678        let (ptr, alloc) = Box::into_unique(x);
679        unsafe { Self::from_inner_in(ptr.into(), alloc) }
680    }
681
682    /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
683    ///
684    /// # Examples
685    ///
686    /// ```
687    /// #![feature(get_mut_unchecked)]
688    /// #![feature(allocator_api)]
689    ///
690    /// use std::sync::Arc;
691    /// use std::alloc::System;
692    ///
693    /// let mut five = Arc::<u32, _>::new_uninit_in(System);
694    ///
695    /// let five = unsafe {
696    ///     // Deferred initialization:
697    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
698    ///
699    ///     five.assume_init()
700    /// };
701    ///
702    /// assert_eq!(*five, 5)
703    /// ```
704    #[cfg(not(no_global_oom_handling))]
705    #[unstable(feature = "allocator_api", issue = "32838")]
706    // #[unstable(feature = "new_uninit", issue = "63291")]
707    #[inline]
708    pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
709        unsafe {
710            Arc::from_ptr_in(
711                Arc::allocate_for_layout(
712                    Layout::new::<T>(),
713                    |layout| alloc.allocate(layout),
714                    <*mut u8>::cast,
715                ),
716                alloc,
717            )
718        }
719    }
720
721    /// Constructs a new `Arc` with uninitialized contents, with the memory
722    /// being filled with `0` bytes, in the provided allocator.
723    ///
724    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
725    /// of this method.
726    ///
727    /// # Examples
728    ///
729    /// ```
730    /// #![feature(allocator_api)]
731    ///
732    /// use std::sync::Arc;
733    /// use std::alloc::System;
734    ///
735    /// let zero = Arc::<u32, _>::new_zeroed_in(System);
736    /// let zero = unsafe { zero.assume_init() };
737    ///
738    /// assert_eq!(*zero, 0)
739    /// ```
740    ///
741    /// [zeroed]: mem::MaybeUninit::zeroed
742    #[cfg(not(no_global_oom_handling))]
743    #[unstable(feature = "allocator_api", issue = "32838")]
744    // #[unstable(feature = "new_uninit", issue = "63291")]
745    #[inline]
746    pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
747        unsafe {
748            Arc::from_ptr_in(
749                Arc::allocate_for_layout(
750                    Layout::new::<T>(),
751                    |layout| alloc.allocate_zeroed(layout),
752                    <*mut u8>::cast,
753                ),
754                alloc,
755            )
756        }
757    }
758
759    /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
760    /// to allow you to construct a `T` which holds a weak pointer to itself.
761    ///
762    /// Generally, a structure circularly referencing itself, either directly or
763    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
764    /// Using this function, you get access to the weak pointer during the
765    /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
766    /// clone and store it inside the `T`.
767    ///
768    /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
769    /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
770    /// and only afterwards completes the construction of the `Arc<T, A>` by placing
771    /// the `T` returned from your closure into the allocation.
772    ///
773    /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
774    /// returns, calling [`upgrade`] on the weak reference inside your closure will
775    /// fail and result in a `None` value.
776    ///
777    /// # Panics
778    ///
779    /// If `data_fn` panics, the panic is propagated to the caller, and the
780    /// temporary [`Weak<T>`] is dropped normally.
781    ///
782    /// # Example
783    ///
784    /// See [`new_cyclic`]
785    ///
786    /// [`new_cyclic`]: Arc::new_cyclic
787    /// [`upgrade`]: Weak::upgrade
788    #[cfg(not(no_global_oom_handling))]
789    #[inline]
790    #[unstable(feature = "allocator_api", issue = "32838")]
791    pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
792    where
793        F: FnOnce(&Weak<T, A>) -> T,
794    {
795        // Construct the inner in the "uninitialized" state with a single
796        // weak reference.
797        let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
798            ArcInner {
799                strong: atomic::AtomicUsize::new(0),
800                weak: atomic::AtomicUsize::new(1),
801                data: mem::MaybeUninit::<T>::uninit(),
802            },
803            alloc,
804        ));
805        let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
806        let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
807
808        let weak = Weak { ptr: init_ptr, alloc };
809
810        // It's important we don't give up ownership of the weak pointer, or
811        // else the memory might be freed by the time `data_fn` returns. If
812        // we really wanted to pass ownership, we could create an additional
813        // weak pointer for ourselves, but this would result in additional
814        // updates to the weak reference count which might not be necessary
815        // otherwise.
816        let data = data_fn(&weak);
817
818        // Now we can properly initialize the inner value and turn our weak
819        // reference into a strong reference.
820        let strong = unsafe {
821            let inner = init_ptr.as_ptr();
822            ptr::write(&raw mut (*inner).data, data);
823
824            // The above write to the data field must be visible to any threads which
825            // observe a non-zero strong count. Therefore we need at least "Release" ordering
826            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
827            //
828            // "Acquire" ordering is not required. When considering the possible behaviors
829            // of `data_fn` we only need to look at what it could do with a reference to a
830            // non-upgradeable `Weak`:
831            // - It can *clone* the `Weak`, increasing the weak reference count.
832            // - It can drop those clones, decreasing the weak reference count (but never to zero).
833            //
834            // These side effects do not impact us in any way, and no other side effects are
835            // possible with safe code alone.
836            let prev_value = (*inner).strong.fetch_add(1, Release);
837            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
838
839            // Strong references should collectively own a shared weak reference,
840            // so don't run the destructor for our old weak reference.
841            // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
842            // and forgetting the weak reference.
843            let alloc = weak.into_raw_with_allocator().1;
844
845            Arc::from_inner_in(init_ptr, alloc)
846        };
847
848        strong
849    }
850
851    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
852    /// then `data` will be pinned in memory and unable to be moved.
853    #[cfg(not(no_global_oom_handling))]
854    #[unstable(feature = "allocator_api", issue = "32838")]
855    #[inline]
856    pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
857    where
858        A: 'static,
859    {
860        unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
861    }
862
863    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
864    /// fails.
865    #[inline]
866    #[unstable(feature = "allocator_api", issue = "32838")]
867    pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
868    where
869        A: 'static,
870    {
871        unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
872    }
873
874    /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
875    ///
876    /// # Examples
877    ///
878    /// ```
879    /// #![feature(allocator_api)]
880    ///
881    /// use std::sync::Arc;
882    /// use std::alloc::System;
883    ///
884    /// let five = Arc::try_new_in(5, System)?;
885    /// # Ok::<(), std::alloc::AllocError>(())
886    /// ```
887    #[inline]
888    #[unstable(feature = "allocator_api", issue = "32838")]
889    #[inline]
890    pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
891        // Start the weak pointer count as 1 which is the weak pointer that's
892        // held by all the strong pointers (kinda), see std/rc.rs for more info
893        let x = Box::try_new_in(
894            ArcInner {
895                strong: atomic::AtomicUsize::new(1),
896                weak: atomic::AtomicUsize::new(1),
897                data,
898            },
899            alloc,
900        )?;
901        let (ptr, alloc) = Box::into_unique(x);
902        Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
903    }
904
905    /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
906    /// error if allocation fails.
907    ///
908    /// # Examples
909    ///
910    /// ```
911    /// #![feature(allocator_api)]
912    /// #![feature(get_mut_unchecked)]
913    ///
914    /// use std::sync::Arc;
915    /// use std::alloc::System;
916    ///
917    /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
918    ///
919    /// let five = unsafe {
920    ///     // Deferred initialization:
921    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
922    ///
923    ///     five.assume_init()
924    /// };
925    ///
926    /// assert_eq!(*five, 5);
927    /// # Ok::<(), std::alloc::AllocError>(())
928    /// ```
929    #[unstable(feature = "allocator_api", issue = "32838")]
930    // #[unstable(feature = "new_uninit", issue = "63291")]
931    #[inline]
932    pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
933        unsafe {
934            Ok(Arc::from_ptr_in(
935                Arc::try_allocate_for_layout(
936                    Layout::new::<T>(),
937                    |layout| alloc.allocate(layout),
938                    <*mut u8>::cast,
939                )?,
940                alloc,
941            ))
942        }
943    }
944
945    /// Constructs a new `Arc` with uninitialized contents, with the memory
946    /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
947    /// fails.
948    ///
949    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
950    /// of this method.
951    ///
952    /// # Examples
953    ///
954    /// ```
955    /// #![feature(allocator_api)]
956    ///
957    /// use std::sync::Arc;
958    /// use std::alloc::System;
959    ///
960    /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
961    /// let zero = unsafe { zero.assume_init() };
962    ///
963    /// assert_eq!(*zero, 0);
964    /// # Ok::<(), std::alloc::AllocError>(())
965    /// ```
966    ///
967    /// [zeroed]: mem::MaybeUninit::zeroed
968    #[unstable(feature = "allocator_api", issue = "32838")]
969    // #[unstable(feature = "new_uninit", issue = "63291")]
970    #[inline]
971    pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
972        unsafe {
973            Ok(Arc::from_ptr_in(
974                Arc::try_allocate_for_layout(
975                    Layout::new::<T>(),
976                    |layout| alloc.allocate_zeroed(layout),
977                    <*mut u8>::cast,
978                )?,
979                alloc,
980            ))
981        }
982    }
983    /// Returns the inner value, if the `Arc` has exactly one strong reference.
984    ///
985    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
986    /// passed in.
987    ///
988    /// This will succeed even if there are outstanding weak references.
989    ///
990    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
991    /// keep the `Arc` in the [`Err`] case.
992    /// Immediately dropping the [`Err`]-value, as the expression
993    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
994    /// drop to zero and the inner value of the `Arc` to be dropped.
995    /// For instance, if two threads execute such an expression in parallel,
996    /// there is a race condition without the possibility of unsafety:
997    /// The threads could first both check whether they own the last instance
998    /// in `Arc::try_unwrap`, determine that they both do not, and then both
999    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
1000    /// In this scenario, the value inside the `Arc` is safely destroyed
1001    /// by exactly one of the threads, but neither thread will ever be able
1002    /// to use the value.
1003    ///
1004    /// # Examples
1005    ///
1006    /// ```
1007    /// use std::sync::Arc;
1008    ///
1009    /// let x = Arc::new(3);
1010    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1011    ///
1012    /// let x = Arc::new(4);
1013    /// let _y = Arc::clone(&x);
1014    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1015    /// ```
1016    #[inline]
1017    #[stable(feature = "arc_unique", since = "1.4.0")]
1018    pub fn try_unwrap(this: Self) -> Result<T, Self> {
1019        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1020            return Err(this);
1021        }
1022
1023        acquire!(this.inner().strong);
1024
1025        let this = ManuallyDrop::new(this);
1026        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1027        let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1028
1029        // Make a weak pointer to clean up the implicit strong-weak reference
1030        let _weak = Weak { ptr: this.ptr, alloc };
1031
1032        Ok(elem)
1033    }
1034
1035    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1036    ///
1037    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1038    ///
1039    /// This will succeed even if there are outstanding weak references.
1040    ///
1041    /// If `Arc::into_inner` is called on every clone of this `Arc`,
1042    /// it is guaranteed that exactly one of the calls returns the inner value.
1043    /// This means in particular that the inner value is not dropped.
1044    ///
1045    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1046    /// is meant for different use-cases. If used as a direct replacement
1047    /// for `Arc::into_inner` anyway, such as with the expression
1048    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1049    /// **not** give the same guarantee as described in the previous paragraph.
1050    /// For more information, see the examples below and read the documentation
1051    /// of [`Arc::try_unwrap`].
1052    ///
1053    /// # Examples
1054    ///
1055    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1056    /// ```
1057    /// use std::sync::Arc;
1058    ///
1059    /// let x = Arc::new(3);
1060    /// let y = Arc::clone(&x);
1061    ///
1062    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1063    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1064    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1065    ///
1066    /// let x_inner_value = x_thread.join().unwrap();
1067    /// let y_inner_value = y_thread.join().unwrap();
1068    ///
1069    /// // One of the threads is guaranteed to receive the inner value:
1070    /// assert!(matches!(
1071    ///     (x_inner_value, y_inner_value),
1072    ///     (None, Some(3)) | (Some(3), None)
1073    /// ));
1074    /// // The result could also be `(None, None)` if the threads called
1075    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1076    /// ```
1077    ///
1078    /// A more practical example demonstrating the need for `Arc::into_inner`:
1079    /// ```
1080    /// use std::sync::Arc;
1081    ///
1082    /// // Definition of a simple singly linked list using `Arc`:
1083    /// #[derive(Clone)]
1084    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1085    /// struct Node<T>(T, Option<Arc<Node<T>>>);
1086    ///
1087    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1088    /// // can cause a stack overflow. To prevent this, we can provide a
1089    /// // manual `Drop` implementation that does the destruction in a loop:
1090    /// impl<T> Drop for LinkedList<T> {
1091    ///     fn drop(&mut self) {
1092    ///         let mut link = self.0.take();
1093    ///         while let Some(arc_node) = link.take() {
1094    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1095    ///                 link = next;
1096    ///             }
1097    ///         }
1098    ///     }
1099    /// }
1100    ///
1101    /// // Implementation of `new` and `push` omitted
1102    /// impl<T> LinkedList<T> {
1103    ///     /* ... */
1104    /// #   fn new() -> Self {
1105    /// #       LinkedList(None)
1106    /// #   }
1107    /// #   fn push(&mut self, x: T) {
1108    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
1109    /// #   }
1110    /// }
1111    ///
1112    /// // The following code could have still caused a stack overflow
1113    /// // despite the manual `Drop` impl if that `Drop` impl had used
1114    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1115    ///
1116    /// // Create a long list and clone it
1117    /// let mut x = LinkedList::new();
1118    /// let size = 100000;
1119    /// # let size = if cfg!(miri) { 100 } else { size };
1120    /// for i in 0..size {
1121    ///     x.push(i); // Adds i to the front of x
1122    /// }
1123    /// let y = x.clone();
1124    ///
1125    /// // Drop the clones in parallel
1126    /// let x_thread = std::thread::spawn(|| drop(x));
1127    /// let y_thread = std::thread::spawn(|| drop(y));
1128    /// x_thread.join().unwrap();
1129    /// y_thread.join().unwrap();
1130    /// ```
1131    #[inline]
1132    #[stable(feature = "arc_into_inner", since = "1.70.0")]
1133    pub fn into_inner(this: Self) -> Option<T> {
1134        // Make sure that the ordinary `Drop` implementation isn’t called as well
1135        let mut this = mem::ManuallyDrop::new(this);
1136
1137        // Following the implementation of `drop` and `drop_slow`
1138        if this.inner().strong.fetch_sub(1, Release) != 1 {
1139            return None;
1140        }
1141
1142        acquire!(this.inner().strong);
1143
1144        // SAFETY: This mirrors the line
1145        //
1146        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1147        //
1148        // in `drop_slow`. Instead of dropping the value behind the pointer,
1149        // it is read and eventually returned; `ptr::read` has the same
1150        // safety conditions as `ptr::drop_in_place`.
1151
1152        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1153        let alloc = unsafe { ptr::read(&this.alloc) };
1154
1155        drop(Weak { ptr: this.ptr, alloc });
1156
1157        Some(inner)
1158    }
1159}
1160
1161impl<T> Arc<[T]> {
1162    /// Constructs a new atomically reference-counted slice with uninitialized contents.
1163    ///
1164    /// # Examples
1165    ///
1166    /// ```
1167    /// #![feature(get_mut_unchecked)]
1168    ///
1169    /// use std::sync::Arc;
1170    ///
1171    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1172    ///
1173    /// // Deferred initialization:
1174    /// let data = Arc::get_mut(&mut values).unwrap();
1175    /// data[0].write(1);
1176    /// data[1].write(2);
1177    /// data[2].write(3);
1178    ///
1179    /// let values = unsafe { values.assume_init() };
1180    ///
1181    /// assert_eq!(*values, [1, 2, 3])
1182    /// ```
1183    #[cfg(not(no_global_oom_handling))]
1184    #[inline]
1185    #[stable(feature = "new_uninit", since = "1.82.0")]
1186    #[must_use]
1187    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1188        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1189    }
1190
1191    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1192    /// filled with `0` bytes.
1193    ///
1194    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1195    /// incorrect usage of this method.
1196    ///
1197    /// # Examples
1198    ///
1199    /// ```
1200    /// #![feature(new_zeroed_alloc)]
1201    ///
1202    /// use std::sync::Arc;
1203    ///
1204    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1205    /// let values = unsafe { values.assume_init() };
1206    ///
1207    /// assert_eq!(*values, [0, 0, 0])
1208    /// ```
1209    ///
1210    /// [zeroed]: mem::MaybeUninit::zeroed
1211    #[cfg(not(no_global_oom_handling))]
1212    #[inline]
1213    #[unstable(feature = "new_zeroed_alloc", issue = "129396")]
1214    #[must_use]
1215    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1216        unsafe {
1217            Arc::from_ptr(Arc::allocate_for_layout(
1218                Layout::array::<T>(len).unwrap(),
1219                |layout| Global.allocate_zeroed(layout),
1220                |mem| {
1221                    ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1222                        as *mut ArcInner<[mem::MaybeUninit<T>]>
1223                },
1224            ))
1225        }
1226    }
1227
1228    /// Converts the reference-counted slice into a reference-counted array.
1229    ///
1230    /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1231    ///
1232    /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1233    #[unstable(feature = "slice_as_array", issue = "133508")]
1234    #[inline]
1235    #[must_use]
1236    pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1237        if self.len() == N {
1238            let ptr = Self::into_raw(self) as *const [T; N];
1239
1240            // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1241            let me = unsafe { Arc::from_raw(ptr) };
1242            Some(me)
1243        } else {
1244            None
1245        }
1246    }
1247}
1248
1249impl<T, A: Allocator> Arc<[T], A> {
1250    /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1251    /// provided allocator.
1252    ///
1253    /// # Examples
1254    ///
1255    /// ```
1256    /// #![feature(get_mut_unchecked)]
1257    /// #![feature(allocator_api)]
1258    ///
1259    /// use std::sync::Arc;
1260    /// use std::alloc::System;
1261    ///
1262    /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1263    ///
1264    /// let values = unsafe {
1265    ///     // Deferred initialization:
1266    ///     Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1267    ///     Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1268    ///     Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1269    ///
1270    ///     values.assume_init()
1271    /// };
1272    ///
1273    /// assert_eq!(*values, [1, 2, 3])
1274    /// ```
1275    #[cfg(not(no_global_oom_handling))]
1276    #[unstable(feature = "allocator_api", issue = "32838")]
1277    #[inline]
1278    pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1279        unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1280    }
1281
1282    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1283    /// filled with `0` bytes, in the provided allocator.
1284    ///
1285    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1286    /// incorrect usage of this method.
1287    ///
1288    /// # Examples
1289    ///
1290    /// ```
1291    /// #![feature(allocator_api)]
1292    ///
1293    /// use std::sync::Arc;
1294    /// use std::alloc::System;
1295    ///
1296    /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1297    /// let values = unsafe { values.assume_init() };
1298    ///
1299    /// assert_eq!(*values, [0, 0, 0])
1300    /// ```
1301    ///
1302    /// [zeroed]: mem::MaybeUninit::zeroed
1303    #[cfg(not(no_global_oom_handling))]
1304    #[unstable(feature = "allocator_api", issue = "32838")]
1305    #[inline]
1306    pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1307        unsafe {
1308            Arc::from_ptr_in(
1309                Arc::allocate_for_layout(
1310                    Layout::array::<T>(len).unwrap(),
1311                    |layout| alloc.allocate_zeroed(layout),
1312                    |mem| {
1313                        ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1314                            as *mut ArcInner<[mem::MaybeUninit<T>]>
1315                    },
1316                ),
1317                alloc,
1318            )
1319        }
1320    }
1321}
1322
1323impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1324    /// Converts to `Arc<T>`.
1325    ///
1326    /// # Safety
1327    ///
1328    /// As with [`MaybeUninit::assume_init`],
1329    /// it is up to the caller to guarantee that the inner value
1330    /// really is in an initialized state.
1331    /// Calling this when the content is not yet fully initialized
1332    /// causes immediate undefined behavior.
1333    ///
1334    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1335    ///
1336    /// # Examples
1337    ///
1338    /// ```
1339    /// #![feature(get_mut_unchecked)]
1340    ///
1341    /// use std::sync::Arc;
1342    ///
1343    /// let mut five = Arc::<u32>::new_uninit();
1344    ///
1345    /// // Deferred initialization:
1346    /// Arc::get_mut(&mut five).unwrap().write(5);
1347    ///
1348    /// let five = unsafe { five.assume_init() };
1349    ///
1350    /// assert_eq!(*five, 5)
1351    /// ```
1352    #[stable(feature = "new_uninit", since = "1.82.0")]
1353    #[must_use = "`self` will be dropped if the result is not used"]
1354    #[inline]
1355    pub unsafe fn assume_init(self) -> Arc<T, A> {
1356        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1357        unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1358    }
1359}
1360
1361impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1362    /// Converts to `Arc<[T]>`.
1363    ///
1364    /// # Safety
1365    ///
1366    /// As with [`MaybeUninit::assume_init`],
1367    /// it is up to the caller to guarantee that the inner value
1368    /// really is in an initialized state.
1369    /// Calling this when the content is not yet fully initialized
1370    /// causes immediate undefined behavior.
1371    ///
1372    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1373    ///
1374    /// # Examples
1375    ///
1376    /// ```
1377    /// #![feature(get_mut_unchecked)]
1378    ///
1379    /// use std::sync::Arc;
1380    ///
1381    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1382    ///
1383    /// // Deferred initialization:
1384    /// let data = Arc::get_mut(&mut values).unwrap();
1385    /// data[0].write(1);
1386    /// data[1].write(2);
1387    /// data[2].write(3);
1388    ///
1389    /// let values = unsafe { values.assume_init() };
1390    ///
1391    /// assert_eq!(*values, [1, 2, 3])
1392    /// ```
1393    #[stable(feature = "new_uninit", since = "1.82.0")]
1394    #[must_use = "`self` will be dropped if the result is not used"]
1395    #[inline]
1396    pub unsafe fn assume_init(self) -> Arc<[T], A> {
1397        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1398        unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1399    }
1400}
1401
1402impl<T: ?Sized> Arc<T> {
1403    /// Constructs an `Arc<T>` from a raw pointer.
1404    ///
1405    /// The raw pointer must have been previously returned by a call to
1406    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1407    ///
1408    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1409    ///   is trivially true if `U` is `T`.
1410    /// * If `U` is unsized, its data pointer must have the same size and
1411    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1412    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1413    ///   coercion].
1414    ///
1415    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1416    /// and alignment, this is basically like transmuting references of
1417    /// different types. See [`mem::transmute`][transmute] for more information
1418    /// on what restrictions apply in this case.
1419    ///
1420    /// The raw pointer must point to a block of memory allocated by the global allocator.
1421    ///
1422    /// The user of `from_raw` has to make sure a specific value of `T` is only
1423    /// dropped once.
1424    ///
1425    /// This function is unsafe because improper use may lead to memory unsafety,
1426    /// even if the returned `Arc<T>` is never accessed.
1427    ///
1428    /// [into_raw]: Arc::into_raw
1429    /// [transmute]: core::mem::transmute
1430    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1431    ///
1432    /// # Examples
1433    ///
1434    /// ```
1435    /// use std::sync::Arc;
1436    ///
1437    /// let x = Arc::new("hello".to_owned());
1438    /// let x_ptr = Arc::into_raw(x);
1439    ///
1440    /// unsafe {
1441    ///     // Convert back to an `Arc` to prevent leak.
1442    ///     let x = Arc::from_raw(x_ptr);
1443    ///     assert_eq!(&*x, "hello");
1444    ///
1445    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1446    /// }
1447    ///
1448    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1449    /// ```
1450    ///
1451    /// Convert a slice back into its original array:
1452    ///
1453    /// ```
1454    /// use std::sync::Arc;
1455    ///
1456    /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1457    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1458    ///
1459    /// unsafe {
1460    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1461    ///     assert_eq!(&*x, &[1, 2, 3]);
1462    /// }
1463    /// ```
1464    #[inline]
1465    #[stable(feature = "rc_raw", since = "1.17.0")]
1466    pub unsafe fn from_raw(ptr: *const T) -> Self {
1467        unsafe { Arc::from_raw_in(ptr, Global) }
1468    }
1469
1470    /// Consumes the `Arc`, returning the wrapped pointer.
1471    ///
1472    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1473    /// [`Arc::from_raw`].
1474    ///
1475    /// # Examples
1476    ///
1477    /// ```
1478    /// use std::sync::Arc;
1479    ///
1480    /// let x = Arc::new("hello".to_owned());
1481    /// let x_ptr = Arc::into_raw(x);
1482    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1483    /// # // Prevent leaks for Miri.
1484    /// # drop(unsafe { Arc::from_raw(x_ptr) });
1485    /// ```
1486    #[must_use = "losing the pointer will leak memory"]
1487    #[stable(feature = "rc_raw", since = "1.17.0")]
1488    #[rustc_never_returns_null_ptr]
1489    pub fn into_raw(this: Self) -> *const T {
1490        let this = ManuallyDrop::new(this);
1491        Self::as_ptr(&*this)
1492    }
1493
1494    /// Increments the strong reference count on the `Arc<T>` associated with the
1495    /// provided pointer by one.
1496    ///
1497    /// # Safety
1498    ///
1499    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1500    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1501    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1502    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1503    /// allocated by the global allocator.
1504    ///
1505    /// [from_raw_in]: Arc::from_raw_in
1506    ///
1507    /// # Examples
1508    ///
1509    /// ```
1510    /// use std::sync::Arc;
1511    ///
1512    /// let five = Arc::new(5);
1513    ///
1514    /// unsafe {
1515    ///     let ptr = Arc::into_raw(five);
1516    ///     Arc::increment_strong_count(ptr);
1517    ///
1518    ///     // This assertion is deterministic because we haven't shared
1519    ///     // the `Arc` between threads.
1520    ///     let five = Arc::from_raw(ptr);
1521    ///     assert_eq!(2, Arc::strong_count(&five));
1522    /// #   // Prevent leaks for Miri.
1523    /// #   Arc::decrement_strong_count(ptr);
1524    /// }
1525    /// ```
1526    #[inline]
1527    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1528    pub unsafe fn increment_strong_count(ptr: *const T) {
1529        unsafe { Arc::increment_strong_count_in(ptr, Global) }
1530    }
1531
1532    /// Decrements the strong reference count on the `Arc<T>` associated with the
1533    /// provided pointer by one.
1534    ///
1535    /// # Safety
1536    ///
1537    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1538    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1539    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1540    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1541    /// allocated by the global allocator. This method can be used to release the final
1542    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1543    /// released.
1544    ///
1545    /// [from_raw_in]: Arc::from_raw_in
1546    ///
1547    /// # Examples
1548    ///
1549    /// ```
1550    /// use std::sync::Arc;
1551    ///
1552    /// let five = Arc::new(5);
1553    ///
1554    /// unsafe {
1555    ///     let ptr = Arc::into_raw(five);
1556    ///     Arc::increment_strong_count(ptr);
1557    ///
1558    ///     // Those assertions are deterministic because we haven't shared
1559    ///     // the `Arc` between threads.
1560    ///     let five = Arc::from_raw(ptr);
1561    ///     assert_eq!(2, Arc::strong_count(&five));
1562    ///     Arc::decrement_strong_count(ptr);
1563    ///     assert_eq!(1, Arc::strong_count(&five));
1564    /// }
1565    /// ```
1566    #[inline]
1567    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1568    pub unsafe fn decrement_strong_count(ptr: *const T) {
1569        unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1570    }
1571}
1572
1573impl<T: ?Sized, A: Allocator> Arc<T, A> {
1574    /// Returns a reference to the underlying allocator.
1575    ///
1576    /// Note: this is an associated function, which means that you have
1577    /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1578    /// is so that there is no conflict with a method on the inner type.
1579    #[inline]
1580    #[unstable(feature = "allocator_api", issue = "32838")]
1581    pub fn allocator(this: &Self) -> &A {
1582        &this.alloc
1583    }
1584
1585    /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1586    ///
1587    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1588    /// [`Arc::from_raw_in`].
1589    ///
1590    /// # Examples
1591    ///
1592    /// ```
1593    /// #![feature(allocator_api)]
1594    /// use std::sync::Arc;
1595    /// use std::alloc::System;
1596    ///
1597    /// let x = Arc::new_in("hello".to_owned(), System);
1598    /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1599    /// assert_eq!(unsafe { &*ptr }, "hello");
1600    /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1601    /// assert_eq!(&*x, "hello");
1602    /// ```
1603    #[must_use = "losing the pointer will leak memory"]
1604    #[unstable(feature = "allocator_api", issue = "32838")]
1605    pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1606        let this = mem::ManuallyDrop::new(this);
1607        let ptr = Self::as_ptr(&this);
1608        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1609        let alloc = unsafe { ptr::read(&this.alloc) };
1610        (ptr, alloc)
1611    }
1612
1613    /// Provides a raw pointer to the data.
1614    ///
1615    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1616    /// as long as there are strong counts in the `Arc`.
1617    ///
1618    /// # Examples
1619    ///
1620    /// ```
1621    /// use std::sync::Arc;
1622    ///
1623    /// let x = Arc::new("hello".to_owned());
1624    /// let y = Arc::clone(&x);
1625    /// let x_ptr = Arc::as_ptr(&x);
1626    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1627    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1628    /// ```
1629    #[must_use]
1630    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1631    #[rustc_never_returns_null_ptr]
1632    pub fn as_ptr(this: &Self) -> *const T {
1633        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1634
1635        // SAFETY: This cannot go through Deref::deref or RcInnerPtr::inner because
1636        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1637        // write through the pointer after the Rc is recovered through `from_raw`.
1638        unsafe { &raw mut (*ptr).data }
1639    }
1640
1641    /// Constructs an `Arc<T, A>` from a raw pointer.
1642    ///
1643    /// The raw pointer must have been previously returned by a call to [`Arc<U,
1644    /// A>::into_raw`][into_raw] with the following requirements:
1645    ///
1646    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1647    ///   is trivially true if `U` is `T`.
1648    /// * If `U` is unsized, its data pointer must have the same size and
1649    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1650    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1651    ///   coercion].
1652    ///
1653    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1654    /// and alignment, this is basically like transmuting references of
1655    /// different types. See [`mem::transmute`][transmute] for more information
1656    /// on what restrictions apply in this case.
1657    ///
1658    /// The raw pointer must point to a block of memory allocated by `alloc`
1659    ///
1660    /// The user of `from_raw` has to make sure a specific value of `T` is only
1661    /// dropped once.
1662    ///
1663    /// This function is unsafe because improper use may lead to memory unsafety,
1664    /// even if the returned `Arc<T>` is never accessed.
1665    ///
1666    /// [into_raw]: Arc::into_raw
1667    /// [transmute]: core::mem::transmute
1668    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1669    ///
1670    /// # Examples
1671    ///
1672    /// ```
1673    /// #![feature(allocator_api)]
1674    ///
1675    /// use std::sync::Arc;
1676    /// use std::alloc::System;
1677    ///
1678    /// let x = Arc::new_in("hello".to_owned(), System);
1679    /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1680    ///
1681    /// unsafe {
1682    ///     // Convert back to an `Arc` to prevent leak.
1683    ///     let x = Arc::from_raw_in(x_ptr, System);
1684    ///     assert_eq!(&*x, "hello");
1685    ///
1686    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1687    /// }
1688    ///
1689    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1690    /// ```
1691    ///
1692    /// Convert a slice back into its original array:
1693    ///
1694    /// ```
1695    /// #![feature(allocator_api)]
1696    ///
1697    /// use std::sync::Arc;
1698    /// use std::alloc::System;
1699    ///
1700    /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1701    /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1702    ///
1703    /// unsafe {
1704    ///     let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1705    ///     assert_eq!(&*x, &[1, 2, 3]);
1706    /// }
1707    /// ```
1708    #[inline]
1709    #[unstable(feature = "allocator_api", issue = "32838")]
1710    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1711        unsafe {
1712            let offset = data_offset(ptr);
1713
1714            // Reverse the offset to find the original ArcInner.
1715            let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1716
1717            Self::from_ptr_in(arc_ptr, alloc)
1718        }
1719    }
1720
1721    /// Creates a new [`Weak`] pointer to this allocation.
1722    ///
1723    /// # Examples
1724    ///
1725    /// ```
1726    /// use std::sync::Arc;
1727    ///
1728    /// let five = Arc::new(5);
1729    ///
1730    /// let weak_five = Arc::downgrade(&five);
1731    /// ```
1732    #[must_use = "this returns a new `Weak` pointer, \
1733                  without modifying the original `Arc`"]
1734    #[stable(feature = "arc_weak", since = "1.4.0")]
1735    pub fn downgrade(this: &Self) -> Weak<T, A>
1736    where
1737        A: Clone,
1738    {
1739        // This Relaxed is OK because we're checking the value in the CAS
1740        // below.
1741        let mut cur = this.inner().weak.load(Relaxed);
1742
1743        loop {
1744            // check if the weak counter is currently "locked"; if so, spin.
1745            if cur == usize::MAX {
1746                hint::spin_loop();
1747                cur = this.inner().weak.load(Relaxed);
1748                continue;
1749            }
1750
1751            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1752            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1753
1754            // NOTE: this code currently ignores the possibility of overflow
1755            // into usize::MAX; in general both Rc and Arc need to be adjusted
1756            // to deal with overflow.
1757
1758            // Unlike with Clone(), we need this to be an Acquire read to
1759            // synchronize with the write coming from `is_unique`, so that the
1760            // events prior to that write happen before this read.
1761            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1762                Ok(_) => {
1763                    // Make sure we do not create a dangling Weak
1764                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1765                    return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1766                }
1767                Err(old) => cur = old,
1768            }
1769        }
1770    }
1771
1772    /// Gets the number of [`Weak`] pointers to this allocation.
1773    ///
1774    /// # Safety
1775    ///
1776    /// This method by itself is safe, but using it correctly requires extra care.
1777    /// Another thread can change the weak count at any time,
1778    /// including potentially between calling this method and acting on the result.
1779    ///
1780    /// # Examples
1781    ///
1782    /// ```
1783    /// use std::sync::Arc;
1784    ///
1785    /// let five = Arc::new(5);
1786    /// let _weak_five = Arc::downgrade(&five);
1787    ///
1788    /// // This assertion is deterministic because we haven't shared
1789    /// // the `Arc` or `Weak` between threads.
1790    /// assert_eq!(1, Arc::weak_count(&five));
1791    /// ```
1792    #[inline]
1793    #[must_use]
1794    #[stable(feature = "arc_counts", since = "1.15.0")]
1795    pub fn weak_count(this: &Self) -> usize {
1796        let cnt = this.inner().weak.load(Relaxed);
1797        // If the weak count is currently locked, the value of the
1798        // count was 0 just before taking the lock.
1799        if cnt == usize::MAX { 0 } else { cnt - 1 }
1800    }
1801
1802    /// Gets the number of strong (`Arc`) pointers to this allocation.
1803    ///
1804    /// # Safety
1805    ///
1806    /// This method by itself is safe, but using it correctly requires extra care.
1807    /// Another thread can change the strong count at any time,
1808    /// including potentially between calling this method and acting on the result.
1809    ///
1810    /// # Examples
1811    ///
1812    /// ```
1813    /// use std::sync::Arc;
1814    ///
1815    /// let five = Arc::new(5);
1816    /// let _also_five = Arc::clone(&five);
1817    ///
1818    /// // This assertion is deterministic because we haven't shared
1819    /// // the `Arc` between threads.
1820    /// assert_eq!(2, Arc::strong_count(&five));
1821    /// ```
1822    #[inline]
1823    #[must_use]
1824    #[stable(feature = "arc_counts", since = "1.15.0")]
1825    pub fn strong_count(this: &Self) -> usize {
1826        this.inner().strong.load(Relaxed)
1827    }
1828
1829    /// Increments the strong reference count on the `Arc<T>` associated with the
1830    /// provided pointer by one.
1831    ///
1832    /// # Safety
1833    ///
1834    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1835    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1836    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1837    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1838    /// allocated by `alloc`.
1839    ///
1840    /// [from_raw_in]: Arc::from_raw_in
1841    ///
1842    /// # Examples
1843    ///
1844    /// ```
1845    /// #![feature(allocator_api)]
1846    ///
1847    /// use std::sync::Arc;
1848    /// use std::alloc::System;
1849    ///
1850    /// let five = Arc::new_in(5, System);
1851    ///
1852    /// unsafe {
1853    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1854    ///     Arc::increment_strong_count_in(ptr, System);
1855    ///
1856    ///     // This assertion is deterministic because we haven't shared
1857    ///     // the `Arc` between threads.
1858    ///     let five = Arc::from_raw_in(ptr, System);
1859    ///     assert_eq!(2, Arc::strong_count(&five));
1860    /// #   // Prevent leaks for Miri.
1861    /// #   Arc::decrement_strong_count_in(ptr, System);
1862    /// }
1863    /// ```
1864    #[inline]
1865    #[unstable(feature = "allocator_api", issue = "32838")]
1866    pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1867    where
1868        A: Clone,
1869    {
1870        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1871        let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1872        // Now increase refcount, but don't drop new refcount either
1873        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1874    }
1875
1876    /// Decrements the strong reference count on the `Arc<T>` associated with the
1877    /// provided pointer by one.
1878    ///
1879    /// # Safety
1880    ///
1881    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1882    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1883    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1884    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1885    /// allocated by `alloc`. This method can be used to release the final
1886    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1887    /// released.
1888    ///
1889    /// [from_raw_in]: Arc::from_raw_in
1890    ///
1891    /// # Examples
1892    ///
1893    /// ```
1894    /// #![feature(allocator_api)]
1895    ///
1896    /// use std::sync::Arc;
1897    /// use std::alloc::System;
1898    ///
1899    /// let five = Arc::new_in(5, System);
1900    ///
1901    /// unsafe {
1902    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1903    ///     Arc::increment_strong_count_in(ptr, System);
1904    ///
1905    ///     // Those assertions are deterministic because we haven't shared
1906    ///     // the `Arc` between threads.
1907    ///     let five = Arc::from_raw_in(ptr, System);
1908    ///     assert_eq!(2, Arc::strong_count(&five));
1909    ///     Arc::decrement_strong_count_in(ptr, System);
1910    ///     assert_eq!(1, Arc::strong_count(&five));
1911    /// }
1912    /// ```
1913    #[inline]
1914    #[unstable(feature = "allocator_api", issue = "32838")]
1915    pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1916        unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1917    }
1918
1919    #[inline]
1920    fn inner(&self) -> &ArcInner<T> {
1921        // This unsafety is ok because while this arc is alive we're guaranteed
1922        // that the inner pointer is valid. Furthermore, we know that the
1923        // `ArcInner` structure itself is `Sync` because the inner data is
1924        // `Sync` as well, so we're ok loaning out an immutable pointer to these
1925        // contents.
1926        unsafe { self.ptr.as_ref() }
1927    }
1928
1929    // Non-inlined part of `drop`.
1930    #[inline(never)]
1931    unsafe fn drop_slow(&mut self) {
1932        // Drop the weak ref collectively held by all strong references when this
1933        // variable goes out of scope. This ensures that the memory is deallocated
1934        // even if the destructor of `T` panics.
1935        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1936        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1937        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
1938
1939        // Destroy the data at this time, even though we must not free the box
1940        // allocation itself (there might still be weak pointers lying around).
1941        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1942        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1943    }
1944
1945    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1946    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
1947    ///
1948    /// # Examples
1949    ///
1950    /// ```
1951    /// use std::sync::Arc;
1952    ///
1953    /// let five = Arc::new(5);
1954    /// let same_five = Arc::clone(&five);
1955    /// let other_five = Arc::new(5);
1956    ///
1957    /// assert!(Arc::ptr_eq(&five, &same_five));
1958    /// assert!(!Arc::ptr_eq(&five, &other_five));
1959    /// ```
1960    ///
1961    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1962    #[inline]
1963    #[must_use]
1964    #[stable(feature = "ptr_eq", since = "1.17.0")]
1965    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1966        ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1967    }
1968}
1969
1970impl<T: ?Sized> Arc<T> {
1971    /// Allocates an `ArcInner<T>` with sufficient space for
1972    /// a possibly-unsized inner value where the value has the layout provided.
1973    ///
1974    /// The function `mem_to_arcinner` is called with the data pointer
1975    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1976    #[cfg(not(no_global_oom_handling))]
1977    unsafe fn allocate_for_layout(
1978        value_layout: Layout,
1979        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1980        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1981    ) -> *mut ArcInner<T> {
1982        let layout = arcinner_layout_for_value_layout(value_layout);
1983
1984        let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1985
1986        unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1987    }
1988
1989    /// Allocates an `ArcInner<T>` with sufficient space for
1990    /// a possibly-unsized inner value where the value has the layout provided,
1991    /// returning an error if allocation fails.
1992    ///
1993    /// The function `mem_to_arcinner` is called with the data pointer
1994    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1995    unsafe fn try_allocate_for_layout(
1996        value_layout: Layout,
1997        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1998        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1999    ) -> Result<*mut ArcInner<T>, AllocError> {
2000        let layout = arcinner_layout_for_value_layout(value_layout);
2001
2002        let ptr = allocate(layout)?;
2003
2004        let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
2005
2006        Ok(inner)
2007    }
2008
2009    unsafe fn initialize_arcinner(
2010        ptr: NonNull<[u8]>,
2011        layout: Layout,
2012        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2013    ) -> *mut ArcInner<T> {
2014        let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2015        debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2016
2017        unsafe {
2018            (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2019            (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2020        }
2021
2022        inner
2023    }
2024}
2025
2026impl<T: ?Sized, A: Allocator> Arc<T, A> {
2027    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2028    #[inline]
2029    #[cfg(not(no_global_oom_handling))]
2030    unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2031        // Allocate for the `ArcInner<T>` using the given value.
2032        unsafe {
2033            Arc::allocate_for_layout(
2034                Layout::for_value_raw(ptr),
2035                |layout| alloc.allocate(layout),
2036                |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2037            )
2038        }
2039    }
2040
2041    #[cfg(not(no_global_oom_handling))]
2042    fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2043        unsafe {
2044            let value_size = size_of_val(&*src);
2045            let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2046
2047            // Copy value as bytes
2048            ptr::copy_nonoverlapping(
2049                (&raw const *src) as *const u8,
2050                (&raw mut (*ptr).data) as *mut u8,
2051                value_size,
2052            );
2053
2054            // Free the allocation without dropping its contents
2055            let (bptr, alloc) = Box::into_raw_with_allocator(src);
2056            let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2057            drop(src);
2058
2059            Self::from_ptr_in(ptr, alloc)
2060        }
2061    }
2062}
2063
2064impl<T> Arc<[T]> {
2065    /// Allocates an `ArcInner<[T]>` with the given length.
2066    #[cfg(not(no_global_oom_handling))]
2067    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2068        unsafe {
2069            Self::allocate_for_layout(
2070                Layout::array::<T>(len).unwrap(),
2071                |layout| Global.allocate(layout),
2072                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2073            )
2074        }
2075    }
2076
2077    /// Copy elements from slice into newly allocated `Arc<[T]>`
2078    ///
2079    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2080    #[cfg(not(no_global_oom_handling))]
2081    unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2082        unsafe {
2083            let ptr = Self::allocate_for_slice(v.len());
2084
2085            ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2086
2087            Self::from_ptr(ptr)
2088        }
2089    }
2090
2091    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2092    ///
2093    /// Behavior is undefined should the size be wrong.
2094    #[cfg(not(no_global_oom_handling))]
2095    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2096        // Panic guard while cloning T elements.
2097        // In the event of a panic, elements that have been written
2098        // into the new ArcInner will be dropped, then the memory freed.
2099        struct Guard<T> {
2100            mem: NonNull<u8>,
2101            elems: *mut T,
2102            layout: Layout,
2103            n_elems: usize,
2104        }
2105
2106        impl<T> Drop for Guard<T> {
2107            fn drop(&mut self) {
2108                unsafe {
2109                    let slice = from_raw_parts_mut(self.elems, self.n_elems);
2110                    ptr::drop_in_place(slice);
2111
2112                    Global.deallocate(self.mem, self.layout);
2113                }
2114            }
2115        }
2116
2117        unsafe {
2118            let ptr = Self::allocate_for_slice(len);
2119
2120            let mem = ptr as *mut _ as *mut u8;
2121            let layout = Layout::for_value_raw(ptr);
2122
2123            // Pointer to first element
2124            let elems = (&raw mut (*ptr).data) as *mut T;
2125
2126            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2127
2128            for (i, item) in iter.enumerate() {
2129                ptr::write(elems.add(i), item);
2130                guard.n_elems += 1;
2131            }
2132
2133            // All clear. Forget the guard so it doesn't free the new ArcInner.
2134            mem::forget(guard);
2135
2136            Self::from_ptr(ptr)
2137        }
2138    }
2139}
2140
2141impl<T, A: Allocator> Arc<[T], A> {
2142    /// Allocates an `ArcInner<[T]>` with the given length.
2143    #[inline]
2144    #[cfg(not(no_global_oom_handling))]
2145    unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2146        unsafe {
2147            Arc::allocate_for_layout(
2148                Layout::array::<T>(len).unwrap(),
2149                |layout| alloc.allocate(layout),
2150                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2151            )
2152        }
2153    }
2154}
2155
2156/// Specialization trait used for `From<&[T]>`.
2157#[cfg(not(no_global_oom_handling))]
2158trait ArcFromSlice<T> {
2159    fn from_slice(slice: &[T]) -> Self;
2160}
2161
2162#[cfg(not(no_global_oom_handling))]
2163impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2164    #[inline]
2165    default fn from_slice(v: &[T]) -> Self {
2166        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2167    }
2168}
2169
2170#[cfg(not(no_global_oom_handling))]
2171impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2172    #[inline]
2173    fn from_slice(v: &[T]) -> Self {
2174        unsafe { Arc::copy_from_slice(v) }
2175    }
2176}
2177
2178#[stable(feature = "rust1", since = "1.0.0")]
2179impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2180    /// Makes a clone of the `Arc` pointer.
2181    ///
2182    /// This creates another pointer to the same allocation, increasing the
2183    /// strong reference count.
2184    ///
2185    /// # Examples
2186    ///
2187    /// ```
2188    /// use std::sync::Arc;
2189    ///
2190    /// let five = Arc::new(5);
2191    ///
2192    /// let _ = Arc::clone(&five);
2193    /// ```
2194    #[inline]
2195    fn clone(&self) -> Arc<T, A> {
2196        // Using a relaxed ordering is alright here, as knowledge of the
2197        // original reference prevents other threads from erroneously deleting
2198        // the object.
2199        //
2200        // As explained in the [Boost documentation][1], Increasing the
2201        // reference counter can always be done with memory_order_relaxed: New
2202        // references to an object can only be formed from an existing
2203        // reference, and passing an existing reference from one thread to
2204        // another must already provide any required synchronization.
2205        //
2206        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2207        let old_size = self.inner().strong.fetch_add(1, Relaxed);
2208
2209        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2210        // Arcs. If we don't do this the count can overflow and users will use-after free. This
2211        // branch will never be taken in any realistic program. We abort because such a program is
2212        // incredibly degenerate, and we don't care to support it.
2213        //
2214        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2215        // But we do that check *after* having done the increment, so there is a chance here that
2216        // the worst already happened and we actually do overflow the `usize` counter. However, that
2217        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2218        // above and the `abort` below, which seems exceedingly unlikely.
2219        //
2220        // This is a global invariant, and also applies when using a compare-exchange loop to increment
2221        // counters in other methods.
2222        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2223        // and then overflow using a few `fetch_add`s.
2224        if old_size > MAX_REFCOUNT {
2225            abort();
2226        }
2227
2228        unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2229    }
2230}
2231
2232#[unstable(feature = "ergonomic_clones", issue = "132290")]
2233impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2234
2235#[stable(feature = "rust1", since = "1.0.0")]
2236impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2237    type Target = T;
2238
2239    #[inline]
2240    fn deref(&self) -> &T {
2241        &self.inner().data
2242    }
2243}
2244
2245#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2246unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2247
2248#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2249unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2250
2251#[unstable(feature = "deref_pure_trait", issue = "87121")]
2252unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2253
2254#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2255impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2256
2257#[cfg(not(no_global_oom_handling))]
2258impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2259    /// Makes a mutable reference into the given `Arc`.
2260    ///
2261    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2262    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
2263    /// referred to as clone-on-write.
2264    ///
2265    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2266    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2267    /// be cloned.
2268    ///
2269    /// See also [`get_mut`], which will fail rather than cloning the inner value
2270    /// or dissociating [`Weak`] pointers.
2271    ///
2272    /// [`clone`]: Clone::clone
2273    /// [`get_mut`]: Arc::get_mut
2274    ///
2275    /// # Examples
2276    ///
2277    /// ```
2278    /// use std::sync::Arc;
2279    ///
2280    /// let mut data = Arc::new(5);
2281    ///
2282    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2283    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2284    /// *Arc::make_mut(&mut data) += 1;         // Clones inner data
2285    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2286    /// *Arc::make_mut(&mut other_data) *= 2;   // Won't clone anything
2287    ///
2288    /// // Now `data` and `other_data` point to different allocations.
2289    /// assert_eq!(*data, 8);
2290    /// assert_eq!(*other_data, 12);
2291    /// ```
2292    ///
2293    /// [`Weak`] pointers will be dissociated:
2294    ///
2295    /// ```
2296    /// use std::sync::Arc;
2297    ///
2298    /// let mut data = Arc::new(75);
2299    /// let weak = Arc::downgrade(&data);
2300    ///
2301    /// assert!(75 == *data);
2302    /// assert!(75 == *weak.upgrade().unwrap());
2303    ///
2304    /// *Arc::make_mut(&mut data) += 1;
2305    ///
2306    /// assert!(76 == *data);
2307    /// assert!(weak.upgrade().is_none());
2308    /// ```
2309    #[inline]
2310    #[stable(feature = "arc_unique", since = "1.4.0")]
2311    pub fn make_mut(this: &mut Self) -> &mut T {
2312        let size_of_val = size_of_val::<T>(&**this);
2313
2314        // Note that we hold both a strong reference and a weak reference.
2315        // Thus, releasing our strong reference only will not, by itself, cause
2316        // the memory to be deallocated.
2317        //
2318        // Use Acquire to ensure that we see any writes to `weak` that happen
2319        // before release writes (i.e., decrements) to `strong`. Since we hold a
2320        // weak count, there's no chance the ArcInner itself could be
2321        // deallocated.
2322        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2323            // Another strong pointer exists, so we must clone.
2324
2325            let this_data_ref: &T = &**this;
2326            // `in_progress` drops the allocation if we panic before finishing initializing it.
2327            let mut in_progress: UniqueArcUninit<T, A> =
2328                UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2329
2330            let initialized_clone = unsafe {
2331                // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2332                this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2333                // Cast type of pointer, now that it is initialized.
2334                in_progress.into_arc()
2335            };
2336            *this = initialized_clone;
2337        } else if this.inner().weak.load(Relaxed) != 1 {
2338            // Relaxed suffices in the above because this is fundamentally an
2339            // optimization: we are always racing with weak pointers being
2340            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2341
2342            // We removed the last strong ref, but there are additional weak
2343            // refs remaining. We'll move the contents to a new Arc, and
2344            // invalidate the other weak refs.
2345
2346            // Note that it is not possible for the read of `weak` to yield
2347            // usize::MAX (i.e., locked), since the weak count can only be
2348            // locked by a thread with a strong reference.
2349
2350            // Materialize our own implicit weak pointer, so that it can clean
2351            // up the ArcInner as needed.
2352            let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2353
2354            // Can just steal the data, all that's left is Weaks
2355            //
2356            // We don't need panic-protection like the above branch does, but we might as well
2357            // use the same mechanism.
2358            let mut in_progress: UniqueArcUninit<T, A> =
2359                UniqueArcUninit::new(&**this, this.alloc.clone());
2360            unsafe {
2361                // Initialize `in_progress` with move of **this.
2362                // We have to express this in terms of bytes because `T: ?Sized`; there is no
2363                // operation that just copies a value based on its `size_of_val()`.
2364                ptr::copy_nonoverlapping(
2365                    ptr::from_ref(&**this).cast::<u8>(),
2366                    in_progress.data_ptr().cast::<u8>(),
2367                    size_of_val,
2368                );
2369
2370                ptr::write(this, in_progress.into_arc());
2371            }
2372        } else {
2373            // We were the sole reference of either kind; bump back up the
2374            // strong ref count.
2375            this.inner().strong.store(1, Release);
2376        }
2377
2378        // As with `get_mut()`, the unsafety is ok because our reference was
2379        // either unique to begin with, or became one upon cloning the contents.
2380        unsafe { Self::get_mut_unchecked(this) }
2381    }
2382}
2383
2384impl<T: Clone, A: Allocator> Arc<T, A> {
2385    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2386    /// clone.
2387    ///
2388    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2389    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2390    ///
2391    /// # Examples
2392    ///
2393    /// ```
2394    /// # use std::{ptr, sync::Arc};
2395    /// let inner = String::from("test");
2396    /// let ptr = inner.as_ptr();
2397    ///
2398    /// let arc = Arc::new(inner);
2399    /// let inner = Arc::unwrap_or_clone(arc);
2400    /// // The inner value was not cloned
2401    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2402    ///
2403    /// let arc = Arc::new(inner);
2404    /// let arc2 = arc.clone();
2405    /// let inner = Arc::unwrap_or_clone(arc);
2406    /// // Because there were 2 references, we had to clone the inner value.
2407    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2408    /// // `arc2` is the last reference, so when we unwrap it we get back
2409    /// // the original `String`.
2410    /// let inner = Arc::unwrap_or_clone(arc2);
2411    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2412    /// ```
2413    #[inline]
2414    #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2415    pub fn unwrap_or_clone(this: Self) -> T {
2416        Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2417    }
2418}
2419
2420impl<T: ?Sized, A: Allocator> Arc<T, A> {
2421    /// Returns a mutable reference into the given `Arc`, if there are
2422    /// no other `Arc` or [`Weak`] pointers to the same allocation.
2423    ///
2424    /// Returns [`None`] otherwise, because it is not safe to
2425    /// mutate a shared value.
2426    ///
2427    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2428    /// the inner value when there are other `Arc` pointers.
2429    ///
2430    /// [make_mut]: Arc::make_mut
2431    /// [clone]: Clone::clone
2432    ///
2433    /// # Examples
2434    ///
2435    /// ```
2436    /// use std::sync::Arc;
2437    ///
2438    /// let mut x = Arc::new(3);
2439    /// *Arc::get_mut(&mut x).unwrap() = 4;
2440    /// assert_eq!(*x, 4);
2441    ///
2442    /// let _y = Arc::clone(&x);
2443    /// assert!(Arc::get_mut(&mut x).is_none());
2444    /// ```
2445    #[inline]
2446    #[stable(feature = "arc_unique", since = "1.4.0")]
2447    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2448        if Self::is_unique(this) {
2449            // This unsafety is ok because we're guaranteed that the pointer
2450            // returned is the *only* pointer that will ever be returned to T. Our
2451            // reference count is guaranteed to be 1 at this point, and we required
2452            // the Arc itself to be `mut`, so we're returning the only possible
2453            // reference to the inner data.
2454            unsafe { Some(Arc::get_mut_unchecked(this)) }
2455        } else {
2456            None
2457        }
2458    }
2459
2460    /// Returns a mutable reference into the given `Arc`,
2461    /// without any check.
2462    ///
2463    /// See also [`get_mut`], which is safe and does appropriate checks.
2464    ///
2465    /// [`get_mut`]: Arc::get_mut
2466    ///
2467    /// # Safety
2468    ///
2469    /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2470    /// they must not be dereferenced or have active borrows for the duration
2471    /// of the returned borrow, and their inner type must be exactly the same as the
2472    /// inner type of this Rc (including lifetimes). This is trivially the case if no
2473    /// such pointers exist, for example immediately after `Arc::new`.
2474    ///
2475    /// # Examples
2476    ///
2477    /// ```
2478    /// #![feature(get_mut_unchecked)]
2479    ///
2480    /// use std::sync::Arc;
2481    ///
2482    /// let mut x = Arc::new(String::new());
2483    /// unsafe {
2484    ///     Arc::get_mut_unchecked(&mut x).push_str("foo")
2485    /// }
2486    /// assert_eq!(*x, "foo");
2487    /// ```
2488    /// Other `Arc` pointers to the same allocation must be to the same type.
2489    /// ```no_run
2490    /// #![feature(get_mut_unchecked)]
2491    ///
2492    /// use std::sync::Arc;
2493    ///
2494    /// let x: Arc<str> = Arc::from("Hello, world!");
2495    /// let mut y: Arc<[u8]> = x.clone().into();
2496    /// unsafe {
2497    ///     // this is Undefined Behavior, because x's inner type is str, not [u8]
2498    ///     Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2499    /// }
2500    /// println!("{}", &*x); // Invalid UTF-8 in a str
2501    /// ```
2502    /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2503    /// ```no_run
2504    /// #![feature(get_mut_unchecked)]
2505    ///
2506    /// use std::sync::Arc;
2507    ///
2508    /// let x: Arc<&str> = Arc::new("Hello, world!");
2509    /// {
2510    ///     let s = String::from("Oh, no!");
2511    ///     let mut y: Arc<&str> = x.clone();
2512    ///     unsafe {
2513    ///         // this is Undefined Behavior, because x's inner type
2514    ///         // is &'long str, not &'short str
2515    ///         *Arc::get_mut_unchecked(&mut y) = &s;
2516    ///     }
2517    /// }
2518    /// println!("{}", &*x); // Use-after-free
2519    /// ```
2520    #[inline]
2521    #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2522    pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2523        // We are careful to *not* create a reference covering the "count" fields, as
2524        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2525        unsafe { &mut (*this.ptr.as_ptr()).data }
2526    }
2527
2528    /// Determine whether this is the unique reference to the underlying data.
2529    ///
2530    /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2531    /// returns `false` otherwise.
2532    ///
2533    /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2534    /// on this `Arc`, so long as no clones occur in between.
2535    ///
2536    /// # Examples
2537    ///
2538    /// ```
2539    /// #![feature(arc_is_unique)]
2540    ///
2541    /// use std::sync::Arc;
2542    ///
2543    /// let x = Arc::new(3);
2544    /// assert!(Arc::is_unique(&x));
2545    ///
2546    /// let y = Arc::clone(&x);
2547    /// assert!(!Arc::is_unique(&x));
2548    /// drop(y);
2549    ///
2550    /// // Weak references also count, because they could be upgraded at any time.
2551    /// let z = Arc::downgrade(&x);
2552    /// assert!(!Arc::is_unique(&x));
2553    /// ```
2554    ///
2555    /// # Pointer invalidation
2556    ///
2557    /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2558    /// unlike that operation it does not produce any mutable references to the underlying data,
2559    /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2560    /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2561    ///
2562    /// ```
2563    /// #![feature(arc_is_unique)]
2564    ///
2565    /// use std::sync::Arc;
2566    ///
2567    /// let arc = Arc::new(5);
2568    /// let pointer: *const i32 = &*arc;
2569    /// assert!(Arc::is_unique(&arc));
2570    /// assert_eq!(unsafe { *pointer }, 5);
2571    /// ```
2572    ///
2573    /// # Atomic orderings
2574    ///
2575    /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2576    /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2577    /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2578    ///
2579    /// Note that this operation requires locking the weak ref count, so concurrent calls to
2580    /// `downgrade` may spin-loop for a short period of time.
2581    ///
2582    /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2583    #[inline]
2584    #[unstable(feature = "arc_is_unique", issue = "138938")]
2585    pub fn is_unique(this: &Self) -> bool {
2586        // lock the weak pointer count if we appear to be the sole weak pointer
2587        // holder.
2588        //
2589        // The acquire label here ensures a happens-before relationship with any
2590        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2591        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2592        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2593        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2594            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2595            // counter in `drop` -- the only access that happens when any but the last reference
2596            // is being dropped.
2597            let unique = this.inner().strong.load(Acquire) == 1;
2598
2599            // The release write here synchronizes with a read in `downgrade`,
2600            // effectively preventing the above read of `strong` from happening
2601            // after the write.
2602            this.inner().weak.store(1, Release); // release the lock
2603            unique
2604        } else {
2605            false
2606        }
2607    }
2608}
2609
2610#[stable(feature = "rust1", since = "1.0.0")]
2611unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2612    /// Drops the `Arc`.
2613    ///
2614    /// This will decrement the strong reference count. If the strong reference
2615    /// count reaches zero then the only other references (if any) are
2616    /// [`Weak`], so we `drop` the inner value.
2617    ///
2618    /// # Examples
2619    ///
2620    /// ```
2621    /// use std::sync::Arc;
2622    ///
2623    /// struct Foo;
2624    ///
2625    /// impl Drop for Foo {
2626    ///     fn drop(&mut self) {
2627    ///         println!("dropped!");
2628    ///     }
2629    /// }
2630    ///
2631    /// let foo  = Arc::new(Foo);
2632    /// let foo2 = Arc::clone(&foo);
2633    ///
2634    /// drop(foo);    // Doesn't print anything
2635    /// drop(foo2);   // Prints "dropped!"
2636    /// ```
2637    #[inline]
2638    fn drop(&mut self) {
2639        // Because `fetch_sub` is already atomic, we do not need to synchronize
2640        // with other threads unless we are going to delete the object. This
2641        // same logic applies to the below `fetch_sub` to the `weak` count.
2642        if self.inner().strong.fetch_sub(1, Release) != 1 {
2643            return;
2644        }
2645
2646        // This fence is needed to prevent reordering of use of the data and
2647        // deletion of the data. Because it is marked `Release`, the decreasing
2648        // of the reference count synchronizes with this `Acquire` fence. This
2649        // means that use of the data happens before decreasing the reference
2650        // count, which happens before this fence, which happens before the
2651        // deletion of the data.
2652        //
2653        // As explained in the [Boost documentation][1],
2654        //
2655        // > It is important to enforce any possible access to the object in one
2656        // > thread (through an existing reference) to *happen before* deleting
2657        // > the object in a different thread. This is achieved by a "release"
2658        // > operation after dropping a reference (any access to the object
2659        // > through this reference must obviously happened before), and an
2660        // > "acquire" operation before deleting the object.
2661        //
2662        // In particular, while the contents of an Arc are usually immutable, it's
2663        // possible to have interior writes to something like a Mutex<T>. Since a
2664        // Mutex is not acquired when it is deleted, we can't rely on its
2665        // synchronization logic to make writes in thread A visible to a destructor
2666        // running in thread B.
2667        //
2668        // Also note that the Acquire fence here could probably be replaced with an
2669        // Acquire load, which could improve performance in highly-contended
2670        // situations. See [2].
2671        //
2672        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2673        // [2]: (https://github.com/rust-lang/rust/pull/41714)
2674        acquire!(self.inner().strong);
2675
2676        // Make sure we aren't trying to "drop" the shared static for empty slices
2677        // used by Default::default.
2678        debug_assert!(
2679            !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2680            "Arcs backed by a static should never reach a strong count of 0. \
2681            Likely decrement_strong_count or from_raw were called too many times.",
2682        );
2683
2684        unsafe {
2685            self.drop_slow();
2686        }
2687    }
2688}
2689
2690impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2691    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2692    ///
2693    /// # Examples
2694    ///
2695    /// ```
2696    /// use std::any::Any;
2697    /// use std::sync::Arc;
2698    ///
2699    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2700    ///     if let Ok(string) = value.downcast::<String>() {
2701    ///         println!("String ({}): {}", string.len(), string);
2702    ///     }
2703    /// }
2704    ///
2705    /// let my_string = "Hello World".to_string();
2706    /// print_if_string(Arc::new(my_string));
2707    /// print_if_string(Arc::new(0i8));
2708    /// ```
2709    #[inline]
2710    #[stable(feature = "rc_downcast", since = "1.29.0")]
2711    pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2712    where
2713        T: Any + Send + Sync,
2714    {
2715        if (*self).is::<T>() {
2716            unsafe {
2717                let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2718                Ok(Arc::from_inner_in(ptr.cast(), alloc))
2719            }
2720        } else {
2721            Err(self)
2722        }
2723    }
2724
2725    /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2726    ///
2727    /// For a safe alternative see [`downcast`].
2728    ///
2729    /// # Examples
2730    ///
2731    /// ```
2732    /// #![feature(downcast_unchecked)]
2733    ///
2734    /// use std::any::Any;
2735    /// use std::sync::Arc;
2736    ///
2737    /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2738    ///
2739    /// unsafe {
2740    ///     assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2741    /// }
2742    /// ```
2743    ///
2744    /// # Safety
2745    ///
2746    /// The contained value must be of type `T`. Calling this method
2747    /// with the incorrect type is *undefined behavior*.
2748    ///
2749    ///
2750    /// [`downcast`]: Self::downcast
2751    #[inline]
2752    #[unstable(feature = "downcast_unchecked", issue = "90850")]
2753    pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2754    where
2755        T: Any + Send + Sync,
2756    {
2757        unsafe {
2758            let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2759            Arc::from_inner_in(ptr.cast(), alloc)
2760        }
2761    }
2762}
2763
2764impl<T> Weak<T> {
2765    /// Constructs a new `Weak<T>`, without allocating any memory.
2766    /// Calling [`upgrade`] on the return value always gives [`None`].
2767    ///
2768    /// [`upgrade`]: Weak::upgrade
2769    ///
2770    /// # Examples
2771    ///
2772    /// ```
2773    /// use std::sync::Weak;
2774    ///
2775    /// let empty: Weak<i64> = Weak::new();
2776    /// assert!(empty.upgrade().is_none());
2777    /// ```
2778    #[inline]
2779    #[stable(feature = "downgraded_weak", since = "1.10.0")]
2780    #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2781    #[must_use]
2782    pub const fn new() -> Weak<T> {
2783        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2784    }
2785}
2786
2787impl<T, A: Allocator> Weak<T, A> {
2788    /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2789    /// allocator.
2790    /// Calling [`upgrade`] on the return value always gives [`None`].
2791    ///
2792    /// [`upgrade`]: Weak::upgrade
2793    ///
2794    /// # Examples
2795    ///
2796    /// ```
2797    /// #![feature(allocator_api)]
2798    ///
2799    /// use std::sync::Weak;
2800    /// use std::alloc::System;
2801    ///
2802    /// let empty: Weak<i64, _> = Weak::new_in(System);
2803    /// assert!(empty.upgrade().is_none());
2804    /// ```
2805    #[inline]
2806    #[unstable(feature = "allocator_api", issue = "32838")]
2807    pub fn new_in(alloc: A) -> Weak<T, A> {
2808        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2809    }
2810}
2811
2812/// Helper type to allow accessing the reference counts without
2813/// making any assertions about the data field.
2814struct WeakInner<'a> {
2815    weak: &'a Atomic<usize>,
2816    strong: &'a Atomic<usize>,
2817}
2818
2819impl<T: ?Sized> Weak<T> {
2820    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2821    ///
2822    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2823    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2824    ///
2825    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2826    /// as these don't own anything; the method still works on them).
2827    ///
2828    /// # Safety
2829    ///
2830    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2831    /// weak reference, and must point to a block of memory allocated by global allocator.
2832    ///
2833    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2834    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2835    /// count is not modified by this operation) and therefore it must be paired with a previous
2836    /// call to [`into_raw`].
2837    /// # Examples
2838    ///
2839    /// ```
2840    /// use std::sync::{Arc, Weak};
2841    ///
2842    /// let strong = Arc::new("hello".to_owned());
2843    ///
2844    /// let raw_1 = Arc::downgrade(&strong).into_raw();
2845    /// let raw_2 = Arc::downgrade(&strong).into_raw();
2846    ///
2847    /// assert_eq!(2, Arc::weak_count(&strong));
2848    ///
2849    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2850    /// assert_eq!(1, Arc::weak_count(&strong));
2851    ///
2852    /// drop(strong);
2853    ///
2854    /// // Decrement the last weak count.
2855    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2856    /// ```
2857    ///
2858    /// [`new`]: Weak::new
2859    /// [`into_raw`]: Weak::into_raw
2860    /// [`upgrade`]: Weak::upgrade
2861    #[inline]
2862    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2863    pub unsafe fn from_raw(ptr: *const T) -> Self {
2864        unsafe { Weak::from_raw_in(ptr, Global) }
2865    }
2866
2867    /// Consumes the `Weak<T>` and turns it into a raw pointer.
2868    ///
2869    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2870    /// one weak reference (the weak count is not modified by this operation). It can be turned
2871    /// back into the `Weak<T>` with [`from_raw`].
2872    ///
2873    /// The same restrictions of accessing the target of the pointer as with
2874    /// [`as_ptr`] apply.
2875    ///
2876    /// # Examples
2877    ///
2878    /// ```
2879    /// use std::sync::{Arc, Weak};
2880    ///
2881    /// let strong = Arc::new("hello".to_owned());
2882    /// let weak = Arc::downgrade(&strong);
2883    /// let raw = weak.into_raw();
2884    ///
2885    /// assert_eq!(1, Arc::weak_count(&strong));
2886    /// assert_eq!("hello", unsafe { &*raw });
2887    ///
2888    /// drop(unsafe { Weak::from_raw(raw) });
2889    /// assert_eq!(0, Arc::weak_count(&strong));
2890    /// ```
2891    ///
2892    /// [`from_raw`]: Weak::from_raw
2893    /// [`as_ptr`]: Weak::as_ptr
2894    #[must_use = "losing the pointer will leak memory"]
2895    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2896    pub fn into_raw(self) -> *const T {
2897        ManuallyDrop::new(self).as_ptr()
2898    }
2899}
2900
2901impl<T: ?Sized, A: Allocator> Weak<T, A> {
2902    /// Returns a reference to the underlying allocator.
2903    #[inline]
2904    #[unstable(feature = "allocator_api", issue = "32838")]
2905    pub fn allocator(&self) -> &A {
2906        &self.alloc
2907    }
2908
2909    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2910    ///
2911    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2912    /// unaligned or even [`null`] otherwise.
2913    ///
2914    /// # Examples
2915    ///
2916    /// ```
2917    /// use std::sync::Arc;
2918    /// use std::ptr;
2919    ///
2920    /// let strong = Arc::new("hello".to_owned());
2921    /// let weak = Arc::downgrade(&strong);
2922    /// // Both point to the same object
2923    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2924    /// // The strong here keeps it alive, so we can still access the object.
2925    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2926    ///
2927    /// drop(strong);
2928    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2929    /// // undefined behavior.
2930    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2931    /// ```
2932    ///
2933    /// [`null`]: core::ptr::null "ptr::null"
2934    #[must_use]
2935    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2936    pub fn as_ptr(&self) -> *const T {
2937        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2938
2939        if is_dangling(ptr) {
2940            // If the pointer is dangling, we return the sentinel directly. This cannot be
2941            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2942            ptr as *const T
2943        } else {
2944            // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2945            // The payload may be dropped at this point, and we have to maintain provenance,
2946            // so use raw pointer manipulation.
2947            unsafe { &raw mut (*ptr).data }
2948        }
2949    }
2950
2951    /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
2952    ///
2953    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2954    /// one weak reference (the weak count is not modified by this operation). It can be turned
2955    /// back into the `Weak<T>` with [`from_raw_in`].
2956    ///
2957    /// The same restrictions of accessing the target of the pointer as with
2958    /// [`as_ptr`] apply.
2959    ///
2960    /// # Examples
2961    ///
2962    /// ```
2963    /// #![feature(allocator_api)]
2964    /// use std::sync::{Arc, Weak};
2965    /// use std::alloc::System;
2966    ///
2967    /// let strong = Arc::new_in("hello".to_owned(), System);
2968    /// let weak = Arc::downgrade(&strong);
2969    /// let (raw, alloc) = weak.into_raw_with_allocator();
2970    ///
2971    /// assert_eq!(1, Arc::weak_count(&strong));
2972    /// assert_eq!("hello", unsafe { &*raw });
2973    ///
2974    /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
2975    /// assert_eq!(0, Arc::weak_count(&strong));
2976    /// ```
2977    ///
2978    /// [`from_raw_in`]: Weak::from_raw_in
2979    /// [`as_ptr`]: Weak::as_ptr
2980    #[must_use = "losing the pointer will leak memory"]
2981    #[unstable(feature = "allocator_api", issue = "32838")]
2982    pub fn into_raw_with_allocator(self) -> (*const T, A) {
2983        let this = mem::ManuallyDrop::new(self);
2984        let result = this.as_ptr();
2985        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
2986        let alloc = unsafe { ptr::read(&this.alloc) };
2987        (result, alloc)
2988    }
2989
2990    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2991    /// allocator.
2992    ///
2993    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2994    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2995    ///
2996    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2997    /// as these don't own anything; the method still works on them).
2998    ///
2999    /// # Safety
3000    ///
3001    /// The pointer must have originated from the [`into_raw`] and must still own its potential
3002    /// weak reference, and must point to a block of memory allocated by `alloc`.
3003    ///
3004    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3005    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3006    /// count is not modified by this operation) and therefore it must be paired with a previous
3007    /// call to [`into_raw`].
3008    /// # Examples
3009    ///
3010    /// ```
3011    /// use std::sync::{Arc, Weak};
3012    ///
3013    /// let strong = Arc::new("hello".to_owned());
3014    ///
3015    /// let raw_1 = Arc::downgrade(&strong).into_raw();
3016    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3017    ///
3018    /// assert_eq!(2, Arc::weak_count(&strong));
3019    ///
3020    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3021    /// assert_eq!(1, Arc::weak_count(&strong));
3022    ///
3023    /// drop(strong);
3024    ///
3025    /// // Decrement the last weak count.
3026    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3027    /// ```
3028    ///
3029    /// [`new`]: Weak::new
3030    /// [`into_raw`]: Weak::into_raw
3031    /// [`upgrade`]: Weak::upgrade
3032    #[inline]
3033    #[unstable(feature = "allocator_api", issue = "32838")]
3034    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3035        // See Weak::as_ptr for context on how the input pointer is derived.
3036
3037        let ptr = if is_dangling(ptr) {
3038            // This is a dangling Weak.
3039            ptr as *mut ArcInner<T>
3040        } else {
3041            // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3042            // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3043            let offset = unsafe { data_offset(ptr) };
3044            // Thus, we reverse the offset to get the whole RcInner.
3045            // SAFETY: the pointer originated from a Weak, so this offset is safe.
3046            unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3047        };
3048
3049        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3050        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3051    }
3052}
3053
3054impl<T: ?Sized, A: Allocator> Weak<T, A> {
3055    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3056    /// dropping of the inner value if successful.
3057    ///
3058    /// Returns [`None`] if the inner value has since been dropped.
3059    ///
3060    /// # Examples
3061    ///
3062    /// ```
3063    /// use std::sync::Arc;
3064    ///
3065    /// let five = Arc::new(5);
3066    ///
3067    /// let weak_five = Arc::downgrade(&five);
3068    ///
3069    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3070    /// assert!(strong_five.is_some());
3071    ///
3072    /// // Destroy all strong pointers.
3073    /// drop(strong_five);
3074    /// drop(five);
3075    ///
3076    /// assert!(weak_five.upgrade().is_none());
3077    /// ```
3078    #[must_use = "this returns a new `Arc`, \
3079                  without modifying the original weak pointer"]
3080    #[stable(feature = "arc_weak", since = "1.4.0")]
3081    pub fn upgrade(&self) -> Option<Arc<T, A>>
3082    where
3083        A: Clone,
3084    {
3085        #[inline]
3086        fn checked_increment(n: usize) -> Option<usize> {
3087            // Any write of 0 we can observe leaves the field in permanently zero state.
3088            if n == 0 {
3089                return None;
3090            }
3091            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3092            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3093            Some(n + 1)
3094        }
3095
3096        // We use a CAS loop to increment the strong count instead of a
3097        // fetch_add as this function should never take the reference count
3098        // from zero to one.
3099        //
3100        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3101        // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3102        // value can be initialized after `Weak` references have already been created. In that case, we
3103        // expect to observe the fully initialized value.
3104        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3105            // SAFETY: pointer is not null, verified in checked_increment
3106            unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3107        } else {
3108            None
3109        }
3110    }
3111
3112    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3113    ///
3114    /// If `self` was created using [`Weak::new`], this will return 0.
3115    #[must_use]
3116    #[stable(feature = "weak_counts", since = "1.41.0")]
3117    pub fn strong_count(&self) -> usize {
3118        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3119    }
3120
3121    /// Gets an approximation of the number of `Weak` pointers pointing to this
3122    /// allocation.
3123    ///
3124    /// If `self` was created using [`Weak::new`], or if there are no remaining
3125    /// strong pointers, this will return 0.
3126    ///
3127    /// # Accuracy
3128    ///
3129    /// Due to implementation details, the returned value can be off by 1 in
3130    /// either direction when other threads are manipulating any `Arc`s or
3131    /// `Weak`s pointing to the same allocation.
3132    #[must_use]
3133    #[stable(feature = "weak_counts", since = "1.41.0")]
3134    pub fn weak_count(&self) -> usize {
3135        if let Some(inner) = self.inner() {
3136            let weak = inner.weak.load(Acquire);
3137            let strong = inner.strong.load(Relaxed);
3138            if strong == 0 {
3139                0
3140            } else {
3141                // Since we observed that there was at least one strong pointer
3142                // after reading the weak count, we know that the implicit weak
3143                // reference (present whenever any strong references are alive)
3144                // was still around when we observed the weak count, and can
3145                // therefore safely subtract it.
3146                weak - 1
3147            }
3148        } else {
3149            0
3150        }
3151    }
3152
3153    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3154    /// (i.e., when this `Weak` was created by `Weak::new`).
3155    #[inline]
3156    fn inner(&self) -> Option<WeakInner<'_>> {
3157        let ptr = self.ptr.as_ptr();
3158        if is_dangling(ptr) {
3159            None
3160        } else {
3161            // We are careful to *not* create a reference covering the "data" field, as
3162            // the field may be mutated concurrently (for example, if the last `Arc`
3163            // is dropped, the data field will be dropped in-place).
3164            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3165        }
3166    }
3167
3168    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3169    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3170    /// this function ignores the metadata of  `dyn Trait` pointers.
3171    ///
3172    /// # Notes
3173    ///
3174    /// Since this compares pointers it means that `Weak::new()` will equal each
3175    /// other, even though they don't point to any allocation.
3176    ///
3177    /// # Examples
3178    ///
3179    /// ```
3180    /// use std::sync::Arc;
3181    ///
3182    /// let first_rc = Arc::new(5);
3183    /// let first = Arc::downgrade(&first_rc);
3184    /// let second = Arc::downgrade(&first_rc);
3185    ///
3186    /// assert!(first.ptr_eq(&second));
3187    ///
3188    /// let third_rc = Arc::new(5);
3189    /// let third = Arc::downgrade(&third_rc);
3190    ///
3191    /// assert!(!first.ptr_eq(&third));
3192    /// ```
3193    ///
3194    /// Comparing `Weak::new`.
3195    ///
3196    /// ```
3197    /// use std::sync::{Arc, Weak};
3198    ///
3199    /// let first = Weak::new();
3200    /// let second = Weak::new();
3201    /// assert!(first.ptr_eq(&second));
3202    ///
3203    /// let third_rc = Arc::new(());
3204    /// let third = Arc::downgrade(&third_rc);
3205    /// assert!(!first.ptr_eq(&third));
3206    /// ```
3207    ///
3208    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3209    #[inline]
3210    #[must_use]
3211    #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3212    pub fn ptr_eq(&self, other: &Self) -> bool {
3213        ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3214    }
3215}
3216
3217#[stable(feature = "arc_weak", since = "1.4.0")]
3218impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3219    /// Makes a clone of the `Weak` pointer that points to the same allocation.
3220    ///
3221    /// # Examples
3222    ///
3223    /// ```
3224    /// use std::sync::{Arc, Weak};
3225    ///
3226    /// let weak_five = Arc::downgrade(&Arc::new(5));
3227    ///
3228    /// let _ = Weak::clone(&weak_five);
3229    /// ```
3230    #[inline]
3231    fn clone(&self) -> Weak<T, A> {
3232        if let Some(inner) = self.inner() {
3233            // See comments in Arc::clone() for why this is relaxed. This can use a
3234            // fetch_add (ignoring the lock) because the weak count is only locked
3235            // where are *no other* weak pointers in existence. (So we can't be
3236            // running this code in that case).
3237            let old_size = inner.weak.fetch_add(1, Relaxed);
3238
3239            // See comments in Arc::clone() for why we do this (for mem::forget).
3240            if old_size > MAX_REFCOUNT {
3241                abort();
3242            }
3243        }
3244
3245        Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3246    }
3247}
3248
3249#[unstable(feature = "ergonomic_clones", issue = "132290")]
3250impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3251
3252#[stable(feature = "downgraded_weak", since = "1.10.0")]
3253impl<T> Default for Weak<T> {
3254    /// Constructs a new `Weak<T>`, without allocating memory.
3255    /// Calling [`upgrade`] on the return value always
3256    /// gives [`None`].
3257    ///
3258    /// [`upgrade`]: Weak::upgrade
3259    ///
3260    /// # Examples
3261    ///
3262    /// ```
3263    /// use std::sync::Weak;
3264    ///
3265    /// let empty: Weak<i64> = Default::default();
3266    /// assert!(empty.upgrade().is_none());
3267    /// ```
3268    fn default() -> Weak<T> {
3269        Weak::new()
3270    }
3271}
3272
3273#[stable(feature = "arc_weak", since = "1.4.0")]
3274unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3275    /// Drops the `Weak` pointer.
3276    ///
3277    /// # Examples
3278    ///
3279    /// ```
3280    /// use std::sync::{Arc, Weak};
3281    ///
3282    /// struct Foo;
3283    ///
3284    /// impl Drop for Foo {
3285    ///     fn drop(&mut self) {
3286    ///         println!("dropped!");
3287    ///     }
3288    /// }
3289    ///
3290    /// let foo = Arc::new(Foo);
3291    /// let weak_foo = Arc::downgrade(&foo);
3292    /// let other_weak_foo = Weak::clone(&weak_foo);
3293    ///
3294    /// drop(weak_foo);   // Doesn't print anything
3295    /// drop(foo);        // Prints "dropped!"
3296    ///
3297    /// assert!(other_weak_foo.upgrade().is_none());
3298    /// ```
3299    fn drop(&mut self) {
3300        // If we find out that we were the last weak pointer, then its time to
3301        // deallocate the data entirely. See the discussion in Arc::drop() about
3302        // the memory orderings
3303        //
3304        // It's not necessary to check for the locked state here, because the
3305        // weak count can only be locked if there was precisely one weak ref,
3306        // meaning that drop could only subsequently run ON that remaining weak
3307        // ref, which can only happen after the lock is released.
3308        let inner = if let Some(inner) = self.inner() { inner } else { return };
3309
3310        if inner.weak.fetch_sub(1, Release) == 1 {
3311            acquire!(inner.weak);
3312
3313            // Make sure we aren't trying to "deallocate" the shared static for empty slices
3314            // used by Default::default.
3315            debug_assert!(
3316                !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3317                "Arc/Weaks backed by a static should never be deallocated. \
3318                Likely decrement_strong_count or from_raw were called too many times.",
3319            );
3320
3321            unsafe {
3322                self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3323            }
3324        }
3325    }
3326}
3327
3328#[stable(feature = "rust1", since = "1.0.0")]
3329trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3330    fn eq(&self, other: &Arc<T, A>) -> bool;
3331    fn ne(&self, other: &Arc<T, A>) -> bool;
3332}
3333
3334#[stable(feature = "rust1", since = "1.0.0")]
3335impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3336    #[inline]
3337    default fn eq(&self, other: &Arc<T, A>) -> bool {
3338        **self == **other
3339    }
3340    #[inline]
3341    default fn ne(&self, other: &Arc<T, A>) -> bool {
3342        **self != **other
3343    }
3344}
3345
3346/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3347/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3348/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3349/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3350/// the same value, than two `&T`s.
3351///
3352/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3353#[stable(feature = "rust1", since = "1.0.0")]
3354impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3355    #[inline]
3356    fn eq(&self, other: &Arc<T, A>) -> bool {
3357        Arc::ptr_eq(self, other) || **self == **other
3358    }
3359
3360    #[inline]
3361    fn ne(&self, other: &Arc<T, A>) -> bool {
3362        !Arc::ptr_eq(self, other) && **self != **other
3363    }
3364}
3365
3366#[stable(feature = "rust1", since = "1.0.0")]
3367impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3368    /// Equality for two `Arc`s.
3369    ///
3370    /// Two `Arc`s are equal if their inner values are equal, even if they are
3371    /// stored in different allocation.
3372    ///
3373    /// If `T` also implements `Eq` (implying reflexivity of equality),
3374    /// two `Arc`s that point to the same allocation are always equal.
3375    ///
3376    /// # Examples
3377    ///
3378    /// ```
3379    /// use std::sync::Arc;
3380    ///
3381    /// let five = Arc::new(5);
3382    ///
3383    /// assert!(five == Arc::new(5));
3384    /// ```
3385    #[inline]
3386    fn eq(&self, other: &Arc<T, A>) -> bool {
3387        ArcEqIdent::eq(self, other)
3388    }
3389
3390    /// Inequality for two `Arc`s.
3391    ///
3392    /// Two `Arc`s are not equal if their inner values are not equal.
3393    ///
3394    /// If `T` also implements `Eq` (implying reflexivity of equality),
3395    /// two `Arc`s that point to the same value are always equal.
3396    ///
3397    /// # Examples
3398    ///
3399    /// ```
3400    /// use std::sync::Arc;
3401    ///
3402    /// let five = Arc::new(5);
3403    ///
3404    /// assert!(five != Arc::new(6));
3405    /// ```
3406    #[inline]
3407    fn ne(&self, other: &Arc<T, A>) -> bool {
3408        ArcEqIdent::ne(self, other)
3409    }
3410}
3411
3412#[stable(feature = "rust1", since = "1.0.0")]
3413impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3414    /// Partial comparison for two `Arc`s.
3415    ///
3416    /// The two are compared by calling `partial_cmp()` on their inner values.
3417    ///
3418    /// # Examples
3419    ///
3420    /// ```
3421    /// use std::sync::Arc;
3422    /// use std::cmp::Ordering;
3423    ///
3424    /// let five = Arc::new(5);
3425    ///
3426    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3427    /// ```
3428    fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3429        (**self).partial_cmp(&**other)
3430    }
3431
3432    /// Less-than comparison for two `Arc`s.
3433    ///
3434    /// The two are compared by calling `<` on their inner values.
3435    ///
3436    /// # Examples
3437    ///
3438    /// ```
3439    /// use std::sync::Arc;
3440    ///
3441    /// let five = Arc::new(5);
3442    ///
3443    /// assert!(five < Arc::new(6));
3444    /// ```
3445    fn lt(&self, other: &Arc<T, A>) -> bool {
3446        *(*self) < *(*other)
3447    }
3448
3449    /// 'Less than or equal to' comparison for two `Arc`s.
3450    ///
3451    /// The two are compared by calling `<=` on their inner values.
3452    ///
3453    /// # Examples
3454    ///
3455    /// ```
3456    /// use std::sync::Arc;
3457    ///
3458    /// let five = Arc::new(5);
3459    ///
3460    /// assert!(five <= Arc::new(5));
3461    /// ```
3462    fn le(&self, other: &Arc<T, A>) -> bool {
3463        *(*self) <= *(*other)
3464    }
3465
3466    /// Greater-than comparison for two `Arc`s.
3467    ///
3468    /// The two are compared by calling `>` on their inner values.
3469    ///
3470    /// # Examples
3471    ///
3472    /// ```
3473    /// use std::sync::Arc;
3474    ///
3475    /// let five = Arc::new(5);
3476    ///
3477    /// assert!(five > Arc::new(4));
3478    /// ```
3479    fn gt(&self, other: &Arc<T, A>) -> bool {
3480        *(*self) > *(*other)
3481    }
3482
3483    /// 'Greater than or equal to' comparison for two `Arc`s.
3484    ///
3485    /// The two are compared by calling `>=` on their inner values.
3486    ///
3487    /// # Examples
3488    ///
3489    /// ```
3490    /// use std::sync::Arc;
3491    ///
3492    /// let five = Arc::new(5);
3493    ///
3494    /// assert!(five >= Arc::new(5));
3495    /// ```
3496    fn ge(&self, other: &Arc<T, A>) -> bool {
3497        *(*self) >= *(*other)
3498    }
3499}
3500#[stable(feature = "rust1", since = "1.0.0")]
3501impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3502    /// Comparison for two `Arc`s.
3503    ///
3504    /// The two are compared by calling `cmp()` on their inner values.
3505    ///
3506    /// # Examples
3507    ///
3508    /// ```
3509    /// use std::sync::Arc;
3510    /// use std::cmp::Ordering;
3511    ///
3512    /// let five = Arc::new(5);
3513    ///
3514    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3515    /// ```
3516    fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3517        (**self).cmp(&**other)
3518    }
3519}
3520#[stable(feature = "rust1", since = "1.0.0")]
3521impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3522
3523#[stable(feature = "rust1", since = "1.0.0")]
3524impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3525    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3526        fmt::Display::fmt(&**self, f)
3527    }
3528}
3529
3530#[stable(feature = "rust1", since = "1.0.0")]
3531impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3532    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3533        fmt::Debug::fmt(&**self, f)
3534    }
3535}
3536
3537#[stable(feature = "rust1", since = "1.0.0")]
3538impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3539    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3540        fmt::Pointer::fmt(&(&raw const **self), f)
3541    }
3542}
3543
3544#[cfg(not(no_global_oom_handling))]
3545#[stable(feature = "rust1", since = "1.0.0")]
3546impl<T: Default> Default for Arc<T> {
3547    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3548    ///
3549    /// # Examples
3550    ///
3551    /// ```
3552    /// use std::sync::Arc;
3553    ///
3554    /// let x: Arc<i32> = Default::default();
3555    /// assert_eq!(*x, 0);
3556    /// ```
3557    fn default() -> Arc<T> {
3558        unsafe {
3559            Self::from_inner(
3560                Box::leak(Box::write(
3561                    Box::new_uninit(),
3562                    ArcInner {
3563                        strong: atomic::AtomicUsize::new(1),
3564                        weak: atomic::AtomicUsize::new(1),
3565                        data: T::default(),
3566                    },
3567                ))
3568                .into(),
3569            )
3570        }
3571    }
3572}
3573
3574/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3575/// returned by `Default::default`.
3576///
3577/// Layout notes:
3578/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3579/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3580/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3581#[repr(C, align(16))]
3582struct SliceArcInnerForStatic {
3583    inner: ArcInner<[u8; 1]>,
3584}
3585#[cfg(not(no_global_oom_handling))]
3586const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3587
3588static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3589    inner: ArcInner {
3590        strong: atomic::AtomicUsize::new(1),
3591        weak: atomic::AtomicUsize::new(1),
3592        data: [0],
3593    },
3594};
3595
3596#[cfg(not(no_global_oom_handling))]
3597#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3598impl Default for Arc<str> {
3599    /// Creates an empty str inside an Arc
3600    ///
3601    /// This may or may not share an allocation with other Arcs.
3602    #[inline]
3603    fn default() -> Self {
3604        let arc: Arc<[u8]> = Default::default();
3605        debug_assert!(core::str::from_utf8(&*arc).is_ok());
3606        let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3607        unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3608    }
3609}
3610
3611#[cfg(not(no_global_oom_handling))]
3612#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3613impl Default for Arc<core::ffi::CStr> {
3614    /// Creates an empty CStr inside an Arc
3615    ///
3616    /// This may or may not share an allocation with other Arcs.
3617    #[inline]
3618    fn default() -> Self {
3619        use core::ffi::CStr;
3620        let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3621        let inner: NonNull<ArcInner<CStr>> =
3622            NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3623        // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3624        let this: mem::ManuallyDrop<Arc<CStr>> =
3625            unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3626        (*this).clone()
3627    }
3628}
3629
3630#[cfg(not(no_global_oom_handling))]
3631#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3632impl<T> Default for Arc<[T]> {
3633    /// Creates an empty `[T]` inside an Arc
3634    ///
3635    /// This may or may not share an allocation with other Arcs.
3636    #[inline]
3637    fn default() -> Self {
3638        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3639            // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3640            // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3641            // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3642            // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3643            let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3644            let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3645            // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3646            let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3647                unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3648            return (*this).clone();
3649        }
3650
3651        // If T's alignment is too large for the static, make a new unique allocation.
3652        let arr: [T; 0] = [];
3653        Arc::from(arr)
3654    }
3655}
3656
3657#[stable(feature = "rust1", since = "1.0.0")]
3658impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3659    fn hash<H: Hasher>(&self, state: &mut H) {
3660        (**self).hash(state)
3661    }
3662}
3663
3664#[cfg(not(no_global_oom_handling))]
3665#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3666impl<T> From<T> for Arc<T> {
3667    /// Converts a `T` into an `Arc<T>`
3668    ///
3669    /// The conversion moves the value into a
3670    /// newly allocated `Arc`. It is equivalent to
3671    /// calling `Arc::new(t)`.
3672    ///
3673    /// # Example
3674    /// ```rust
3675    /// # use std::sync::Arc;
3676    /// let x = 5;
3677    /// let arc = Arc::new(5);
3678    ///
3679    /// assert_eq!(Arc::from(x), arc);
3680    /// ```
3681    fn from(t: T) -> Self {
3682        Arc::new(t)
3683    }
3684}
3685
3686#[cfg(not(no_global_oom_handling))]
3687#[stable(feature = "shared_from_array", since = "1.74.0")]
3688impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3689    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3690    ///
3691    /// The conversion moves the array into a newly allocated `Arc`.
3692    ///
3693    /// # Example
3694    ///
3695    /// ```
3696    /// # use std::sync::Arc;
3697    /// let original: [i32; 3] = [1, 2, 3];
3698    /// let shared: Arc<[i32]> = Arc::from(original);
3699    /// assert_eq!(&[1, 2, 3], &shared[..]);
3700    /// ```
3701    #[inline]
3702    fn from(v: [T; N]) -> Arc<[T]> {
3703        Arc::<[T; N]>::from(v)
3704    }
3705}
3706
3707#[cfg(not(no_global_oom_handling))]
3708#[stable(feature = "shared_from_slice", since = "1.21.0")]
3709impl<T: Clone> From<&[T]> for Arc<[T]> {
3710    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3711    ///
3712    /// # Example
3713    ///
3714    /// ```
3715    /// # use std::sync::Arc;
3716    /// let original: &[i32] = &[1, 2, 3];
3717    /// let shared: Arc<[i32]> = Arc::from(original);
3718    /// assert_eq!(&[1, 2, 3], &shared[..]);
3719    /// ```
3720    #[inline]
3721    fn from(v: &[T]) -> Arc<[T]> {
3722        <Self as ArcFromSlice<T>>::from_slice(v)
3723    }
3724}
3725
3726#[cfg(not(no_global_oom_handling))]
3727#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3728impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3729    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3730    ///
3731    /// # Example
3732    ///
3733    /// ```
3734    /// # use std::sync::Arc;
3735    /// let mut original = [1, 2, 3];
3736    /// let original: &mut [i32] = &mut original;
3737    /// let shared: Arc<[i32]> = Arc::from(original);
3738    /// assert_eq!(&[1, 2, 3], &shared[..]);
3739    /// ```
3740    #[inline]
3741    fn from(v: &mut [T]) -> Arc<[T]> {
3742        Arc::from(&*v)
3743    }
3744}
3745
3746#[cfg(not(no_global_oom_handling))]
3747#[stable(feature = "shared_from_slice", since = "1.21.0")]
3748impl From<&str> for Arc<str> {
3749    /// Allocates a reference-counted `str` and copies `v` into it.
3750    ///
3751    /// # Example
3752    ///
3753    /// ```
3754    /// # use std::sync::Arc;
3755    /// let shared: Arc<str> = Arc::from("eggplant");
3756    /// assert_eq!("eggplant", &shared[..]);
3757    /// ```
3758    #[inline]
3759    fn from(v: &str) -> Arc<str> {
3760        let arc = Arc::<[u8]>::from(v.as_bytes());
3761        unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3762    }
3763}
3764
3765#[cfg(not(no_global_oom_handling))]
3766#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3767impl From<&mut str> for Arc<str> {
3768    /// Allocates a reference-counted `str` and copies `v` into it.
3769    ///
3770    /// # Example
3771    ///
3772    /// ```
3773    /// # use std::sync::Arc;
3774    /// let mut original = String::from("eggplant");
3775    /// let original: &mut str = &mut original;
3776    /// let shared: Arc<str> = Arc::from(original);
3777    /// assert_eq!("eggplant", &shared[..]);
3778    /// ```
3779    #[inline]
3780    fn from(v: &mut str) -> Arc<str> {
3781        Arc::from(&*v)
3782    }
3783}
3784
3785#[cfg(not(no_global_oom_handling))]
3786#[stable(feature = "shared_from_slice", since = "1.21.0")]
3787impl From<String> for Arc<str> {
3788    /// Allocates a reference-counted `str` and copies `v` into it.
3789    ///
3790    /// # Example
3791    ///
3792    /// ```
3793    /// # use std::sync::Arc;
3794    /// let unique: String = "eggplant".to_owned();
3795    /// let shared: Arc<str> = Arc::from(unique);
3796    /// assert_eq!("eggplant", &shared[..]);
3797    /// ```
3798    #[inline]
3799    fn from(v: String) -> Arc<str> {
3800        Arc::from(&v[..])
3801    }
3802}
3803
3804#[cfg(not(no_global_oom_handling))]
3805#[stable(feature = "shared_from_slice", since = "1.21.0")]
3806impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3807    /// Move a boxed object to a new, reference-counted allocation.
3808    ///
3809    /// # Example
3810    ///
3811    /// ```
3812    /// # use std::sync::Arc;
3813    /// let unique: Box<str> = Box::from("eggplant");
3814    /// let shared: Arc<str> = Arc::from(unique);
3815    /// assert_eq!("eggplant", &shared[..]);
3816    /// ```
3817    #[inline]
3818    fn from(v: Box<T, A>) -> Arc<T, A> {
3819        Arc::from_box_in(v)
3820    }
3821}
3822
3823#[cfg(not(no_global_oom_handling))]
3824#[stable(feature = "shared_from_slice", since = "1.21.0")]
3825impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3826    /// Allocates a reference-counted slice and moves `v`'s items into it.
3827    ///
3828    /// # Example
3829    ///
3830    /// ```
3831    /// # use std::sync::Arc;
3832    /// let unique: Vec<i32> = vec![1, 2, 3];
3833    /// let shared: Arc<[i32]> = Arc::from(unique);
3834    /// assert_eq!(&[1, 2, 3], &shared[..]);
3835    /// ```
3836    #[inline]
3837    fn from(v: Vec<T, A>) -> Arc<[T], A> {
3838        unsafe {
3839            let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3840
3841            let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3842            ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3843
3844            // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3845            // without dropping its contents or the allocator
3846            let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3847
3848            Self::from_ptr_in(rc_ptr, alloc)
3849        }
3850    }
3851}
3852
3853#[stable(feature = "shared_from_cow", since = "1.45.0")]
3854impl<'a, B> From<Cow<'a, B>> for Arc<B>
3855where
3856    B: ToOwned + ?Sized,
3857    Arc<B>: From<&'a B> + From<B::Owned>,
3858{
3859    /// Creates an atomically reference-counted pointer from a clone-on-write
3860    /// pointer by copying its content.
3861    ///
3862    /// # Example
3863    ///
3864    /// ```rust
3865    /// # use std::sync::Arc;
3866    /// # use std::borrow::Cow;
3867    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3868    /// let shared: Arc<str> = Arc::from(cow);
3869    /// assert_eq!("eggplant", &shared[..]);
3870    /// ```
3871    #[inline]
3872    fn from(cow: Cow<'a, B>) -> Arc<B> {
3873        match cow {
3874            Cow::Borrowed(s) => Arc::from(s),
3875            Cow::Owned(s) => Arc::from(s),
3876        }
3877    }
3878}
3879
3880#[stable(feature = "shared_from_str", since = "1.62.0")]
3881impl From<Arc<str>> for Arc<[u8]> {
3882    /// Converts an atomically reference-counted string slice into a byte slice.
3883    ///
3884    /// # Example
3885    ///
3886    /// ```
3887    /// # use std::sync::Arc;
3888    /// let string: Arc<str> = Arc::from("eggplant");
3889    /// let bytes: Arc<[u8]> = Arc::from(string);
3890    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3891    /// ```
3892    #[inline]
3893    fn from(rc: Arc<str>) -> Self {
3894        // SAFETY: `str` has the same layout as `[u8]`.
3895        unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3896    }
3897}
3898
3899#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3900impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3901    type Error = Arc<[T], A>;
3902
3903    fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3904        if boxed_slice.len() == N {
3905            let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3906            Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
3907        } else {
3908            Err(boxed_slice)
3909        }
3910    }
3911}
3912
3913#[cfg(not(no_global_oom_handling))]
3914#[stable(feature = "shared_from_iter", since = "1.37.0")]
3915impl<T> FromIterator<T> for Arc<[T]> {
3916    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3917    ///
3918    /// # Performance characteristics
3919    ///
3920    /// ## The general case
3921    ///
3922    /// In the general case, collecting into `Arc<[T]>` is done by first
3923    /// collecting into a `Vec<T>`. That is, when writing the following:
3924    ///
3925    /// ```rust
3926    /// # use std::sync::Arc;
3927    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3928    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3929    /// ```
3930    ///
3931    /// this behaves as if we wrote:
3932    ///
3933    /// ```rust
3934    /// # use std::sync::Arc;
3935    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3936    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
3937    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
3938    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3939    /// ```
3940    ///
3941    /// This will allocate as many times as needed for constructing the `Vec<T>`
3942    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3943    ///
3944    /// ## Iterators of known length
3945    ///
3946    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3947    /// a single allocation will be made for the `Arc<[T]>`. For example:
3948    ///
3949    /// ```rust
3950    /// # use std::sync::Arc;
3951    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3952    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3953    /// ```
3954    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3955        ToArcSlice::to_arc_slice(iter.into_iter())
3956    }
3957}
3958
3959#[cfg(not(no_global_oom_handling))]
3960/// Specialization trait used for collecting into `Arc<[T]>`.
3961trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3962    fn to_arc_slice(self) -> Arc<[T]>;
3963}
3964
3965#[cfg(not(no_global_oom_handling))]
3966impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3967    default fn to_arc_slice(self) -> Arc<[T]> {
3968        self.collect::<Vec<T>>().into()
3969    }
3970}
3971
3972#[cfg(not(no_global_oom_handling))]
3973impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3974    fn to_arc_slice(self) -> Arc<[T]> {
3975        // This is the case for a `TrustedLen` iterator.
3976        let (low, high) = self.size_hint();
3977        if let Some(high) = high {
3978            debug_assert_eq!(
3979                low,
3980                high,
3981                "TrustedLen iterator's size hint is not exact: {:?}",
3982                (low, high)
3983            );
3984
3985            unsafe {
3986                // SAFETY: We need to ensure that the iterator has an exact length and we have.
3987                Arc::from_iter_exact(self, low)
3988            }
3989        } else {
3990            // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3991            // length exceeding `usize::MAX`.
3992            // The default implementation would collect into a vec which would panic.
3993            // Thus we panic here immediately without invoking `Vec` code.
3994            panic!("capacity overflow");
3995        }
3996    }
3997}
3998
3999#[stable(feature = "rust1", since = "1.0.0")]
4000impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4001    fn borrow(&self) -> &T {
4002        &**self
4003    }
4004}
4005
4006#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4007impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4008    fn as_ref(&self) -> &T {
4009        &**self
4010    }
4011}
4012
4013#[stable(feature = "pin", since = "1.33.0")]
4014impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4015
4016/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4017///
4018/// # Safety
4019///
4020/// The pointer must point to (and have valid metadata for) a previously
4021/// valid instance of T, but the T is allowed to be dropped.
4022unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4023    // Align the unsized value to the end of the ArcInner.
4024    // Because RcInner is repr(C), it will always be the last field in memory.
4025    // SAFETY: since the only unsized types possible are slices, trait objects,
4026    // and extern types, the input safety requirement is currently enough to
4027    // satisfy the requirements of align_of_val_raw; this is an implementation
4028    // detail of the language that must not be relied upon outside of std.
4029    unsafe { data_offset_align(align_of_val_raw(ptr)) }
4030}
4031
4032#[inline]
4033fn data_offset_align(align: usize) -> usize {
4034    let layout = Layout::new::<ArcInner<()>>();
4035    layout.size() + layout.padding_needed_for(align)
4036}
4037
4038/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4039/// but will deallocate it (without dropping the value) when dropped.
4040///
4041/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4042#[cfg(not(no_global_oom_handling))]
4043struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4044    ptr: NonNull<ArcInner<T>>,
4045    layout_for_value: Layout,
4046    alloc: Option<A>,
4047}
4048
4049#[cfg(not(no_global_oom_handling))]
4050impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4051    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4052    fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4053        let layout = Layout::for_value(for_value);
4054        let ptr = unsafe {
4055            Arc::allocate_for_layout(
4056                layout,
4057                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4058                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4059            )
4060        };
4061        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4062    }
4063
4064    /// Returns the pointer to be written into to initialize the [`Arc`].
4065    fn data_ptr(&mut self) -> *mut T {
4066        let offset = data_offset_align(self.layout_for_value.align());
4067        unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4068    }
4069
4070    /// Upgrade this into a normal [`Arc`].
4071    ///
4072    /// # Safety
4073    ///
4074    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4075    unsafe fn into_arc(self) -> Arc<T, A> {
4076        let mut this = ManuallyDrop::new(self);
4077        let ptr = this.ptr.as_ptr();
4078        let alloc = this.alloc.take().unwrap();
4079
4080        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4081        // for having initialized the data.
4082        unsafe { Arc::from_ptr_in(ptr, alloc) }
4083    }
4084}
4085
4086#[cfg(not(no_global_oom_handling))]
4087impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4088    fn drop(&mut self) {
4089        // SAFETY:
4090        // * new() produced a pointer safe to deallocate.
4091        // * We own the pointer unless into_arc() was called, which forgets us.
4092        unsafe {
4093            self.alloc.take().unwrap().deallocate(
4094                self.ptr.cast(),
4095                arcinner_layout_for_value_layout(self.layout_for_value),
4096            );
4097        }
4098    }
4099}
4100
4101#[stable(feature = "arc_error", since = "1.52.0")]
4102impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4103    #[allow(deprecated, deprecated_in_future)]
4104    fn description(&self) -> &str {
4105        core::error::Error::description(&**self)
4106    }
4107
4108    #[allow(deprecated)]
4109    fn cause(&self) -> Option<&dyn core::error::Error> {
4110        core::error::Error::cause(&**self)
4111    }
4112
4113    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4114        core::error::Error::source(&**self)
4115    }
4116
4117    fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4118        core::error::Error::provide(&**self, req);
4119    }
4120}
4121
4122/// A uniquely owned [`Arc`].
4123///
4124/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4125/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4126/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4127///
4128/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4129/// use case is to have an object be mutable during its initialization phase but then have it become
4130/// immutable and converted to a normal `Arc`.
4131///
4132/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4133///
4134/// ```
4135/// #![feature(unique_rc_arc)]
4136/// use std::sync::{Arc, Weak, UniqueArc};
4137///
4138/// struct Gadget {
4139///     me: Weak<Gadget>,
4140/// }
4141///
4142/// fn create_gadget() -> Option<Arc<Gadget>> {
4143///     let mut rc = UniqueArc::new(Gadget {
4144///         me: Weak::new(),
4145///     });
4146///     rc.me = UniqueArc::downgrade(&rc);
4147///     Some(UniqueArc::into_arc(rc))
4148/// }
4149///
4150/// create_gadget().unwrap();
4151/// ```
4152///
4153/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4154/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4155/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4156/// including fallible or async constructors.
4157#[unstable(feature = "unique_rc_arc", issue = "112566")]
4158pub struct UniqueArc<
4159    T: ?Sized,
4160    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4161> {
4162    ptr: NonNull<ArcInner<T>>,
4163    // Define the ownership of `ArcInner<T>` for drop-check
4164    _marker: PhantomData<ArcInner<T>>,
4165    // Invariance is necessary for soundness: once other `Weak`
4166    // references exist, we already have a form of shared mutability!
4167    _marker2: PhantomData<*mut T>,
4168    alloc: A,
4169}
4170
4171#[unstable(feature = "unique_rc_arc", issue = "112566")]
4172unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4173
4174#[unstable(feature = "unique_rc_arc", issue = "112566")]
4175unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4176
4177#[unstable(feature = "unique_rc_arc", issue = "112566")]
4178// #[unstable(feature = "coerce_unsized", issue = "18598")]
4179impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4180    for UniqueArc<T, A>
4181{
4182}
4183
4184//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4185#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4186impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4187
4188#[unstable(feature = "unique_rc_arc", issue = "112566")]
4189impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4190    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4191        fmt::Display::fmt(&**self, f)
4192    }
4193}
4194
4195#[unstable(feature = "unique_rc_arc", issue = "112566")]
4196impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4197    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4198        fmt::Debug::fmt(&**self, f)
4199    }
4200}
4201
4202#[unstable(feature = "unique_rc_arc", issue = "112566")]
4203impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4204    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4205        fmt::Pointer::fmt(&(&raw const **self), f)
4206    }
4207}
4208
4209#[unstable(feature = "unique_rc_arc", issue = "112566")]
4210impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4211    fn borrow(&self) -> &T {
4212        &**self
4213    }
4214}
4215
4216#[unstable(feature = "unique_rc_arc", issue = "112566")]
4217impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4218    fn borrow_mut(&mut self) -> &mut T {
4219        &mut **self
4220    }
4221}
4222
4223#[unstable(feature = "unique_rc_arc", issue = "112566")]
4224impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4225    fn as_ref(&self) -> &T {
4226        &**self
4227    }
4228}
4229
4230#[unstable(feature = "unique_rc_arc", issue = "112566")]
4231impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4232    fn as_mut(&mut self) -> &mut T {
4233        &mut **self
4234    }
4235}
4236
4237#[unstable(feature = "unique_rc_arc", issue = "112566")]
4238impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4239
4240#[unstable(feature = "unique_rc_arc", issue = "112566")]
4241impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4242    /// Equality for two `UniqueArc`s.
4243    ///
4244    /// Two `UniqueArc`s are equal if their inner values are equal.
4245    ///
4246    /// # Examples
4247    ///
4248    /// ```
4249    /// #![feature(unique_rc_arc)]
4250    /// use std::sync::UniqueArc;
4251    ///
4252    /// let five = UniqueArc::new(5);
4253    ///
4254    /// assert!(five == UniqueArc::new(5));
4255    /// ```
4256    #[inline]
4257    fn eq(&self, other: &Self) -> bool {
4258        PartialEq::eq(&**self, &**other)
4259    }
4260}
4261
4262#[unstable(feature = "unique_rc_arc", issue = "112566")]
4263impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4264    /// Partial comparison for two `UniqueArc`s.
4265    ///
4266    /// The two are compared by calling `partial_cmp()` on their inner values.
4267    ///
4268    /// # Examples
4269    ///
4270    /// ```
4271    /// #![feature(unique_rc_arc)]
4272    /// use std::sync::UniqueArc;
4273    /// use std::cmp::Ordering;
4274    ///
4275    /// let five = UniqueArc::new(5);
4276    ///
4277    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4278    /// ```
4279    #[inline(always)]
4280    fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4281        (**self).partial_cmp(&**other)
4282    }
4283
4284    /// Less-than comparison for two `UniqueArc`s.
4285    ///
4286    /// The two are compared by calling `<` on their inner values.
4287    ///
4288    /// # Examples
4289    ///
4290    /// ```
4291    /// #![feature(unique_rc_arc)]
4292    /// use std::sync::UniqueArc;
4293    ///
4294    /// let five = UniqueArc::new(5);
4295    ///
4296    /// assert!(five < UniqueArc::new(6));
4297    /// ```
4298    #[inline(always)]
4299    fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4300        **self < **other
4301    }
4302
4303    /// 'Less than or equal to' comparison for two `UniqueArc`s.
4304    ///
4305    /// The two are compared by calling `<=` on their inner values.
4306    ///
4307    /// # Examples
4308    ///
4309    /// ```
4310    /// #![feature(unique_rc_arc)]
4311    /// use std::sync::UniqueArc;
4312    ///
4313    /// let five = UniqueArc::new(5);
4314    ///
4315    /// assert!(five <= UniqueArc::new(5));
4316    /// ```
4317    #[inline(always)]
4318    fn le(&self, other: &UniqueArc<T, A>) -> bool {
4319        **self <= **other
4320    }
4321
4322    /// Greater-than comparison for two `UniqueArc`s.
4323    ///
4324    /// The two are compared by calling `>` on their inner values.
4325    ///
4326    /// # Examples
4327    ///
4328    /// ```
4329    /// #![feature(unique_rc_arc)]
4330    /// use std::sync::UniqueArc;
4331    ///
4332    /// let five = UniqueArc::new(5);
4333    ///
4334    /// assert!(five > UniqueArc::new(4));
4335    /// ```
4336    #[inline(always)]
4337    fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4338        **self > **other
4339    }
4340
4341    /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4342    ///
4343    /// The two are compared by calling `>=` on their inner values.
4344    ///
4345    /// # Examples
4346    ///
4347    /// ```
4348    /// #![feature(unique_rc_arc)]
4349    /// use std::sync::UniqueArc;
4350    ///
4351    /// let five = UniqueArc::new(5);
4352    ///
4353    /// assert!(five >= UniqueArc::new(5));
4354    /// ```
4355    #[inline(always)]
4356    fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4357        **self >= **other
4358    }
4359}
4360
4361#[unstable(feature = "unique_rc_arc", issue = "112566")]
4362impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4363    /// Comparison for two `UniqueArc`s.
4364    ///
4365    /// The two are compared by calling `cmp()` on their inner values.
4366    ///
4367    /// # Examples
4368    ///
4369    /// ```
4370    /// #![feature(unique_rc_arc)]
4371    /// use std::sync::UniqueArc;
4372    /// use std::cmp::Ordering;
4373    ///
4374    /// let five = UniqueArc::new(5);
4375    ///
4376    /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4377    /// ```
4378    #[inline]
4379    fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4380        (**self).cmp(&**other)
4381    }
4382}
4383
4384#[unstable(feature = "unique_rc_arc", issue = "112566")]
4385impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4386
4387#[unstable(feature = "unique_rc_arc", issue = "112566")]
4388impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4389    fn hash<H: Hasher>(&self, state: &mut H) {
4390        (**self).hash(state);
4391    }
4392}
4393
4394impl<T> UniqueArc<T, Global> {
4395    /// Creates a new `UniqueArc`.
4396    ///
4397    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4398    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4399    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4400    /// point to the new [`Arc`].
4401    #[cfg(not(no_global_oom_handling))]
4402    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4403    #[must_use]
4404    pub fn new(value: T) -> Self {
4405        Self::new_in(value, Global)
4406    }
4407}
4408
4409impl<T, A: Allocator> UniqueArc<T, A> {
4410    /// Creates a new `UniqueArc` in the provided allocator.
4411    ///
4412    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4413    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4414    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4415    /// point to the new [`Arc`].
4416    #[cfg(not(no_global_oom_handling))]
4417    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4418    #[must_use]
4419    // #[unstable(feature = "allocator_api", issue = "32838")]
4420    pub fn new_in(data: T, alloc: A) -> Self {
4421        let (ptr, alloc) = Box::into_unique(Box::new_in(
4422            ArcInner {
4423                strong: atomic::AtomicUsize::new(0),
4424                // keep one weak reference so if all the weak pointers that are created are dropped
4425                // the UniqueArc still stays valid.
4426                weak: atomic::AtomicUsize::new(1),
4427                data,
4428            },
4429            alloc,
4430        ));
4431        Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4432    }
4433}
4434
4435impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4436    /// Converts the `UniqueArc` into a regular [`Arc`].
4437    ///
4438    /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4439    /// is passed to `into_arc`.
4440    ///
4441    /// Any weak references created before this method is called can now be upgraded to strong
4442    /// references.
4443    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4444    #[must_use]
4445    pub fn into_arc(this: Self) -> Arc<T, A> {
4446        let this = ManuallyDrop::new(this);
4447
4448        // Move the allocator out.
4449        // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4450        // a `ManuallyDrop`.
4451        let alloc: A = unsafe { ptr::read(&this.alloc) };
4452
4453        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4454        unsafe {
4455            // Convert our weak reference into a strong reference
4456            (*this.ptr.as_ptr()).strong.store(1, Release);
4457            Arc::from_inner_in(this.ptr, alloc)
4458        }
4459    }
4460}
4461
4462impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4463    /// Creates a new weak reference to the `UniqueArc`.
4464    ///
4465    /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4466    /// to a [`Arc`] using [`UniqueArc::into_arc`].
4467    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4468    #[must_use]
4469    pub fn downgrade(this: &Self) -> Weak<T, A> {
4470        // Using a relaxed ordering is alright here, as knowledge of the
4471        // original reference prevents other threads from erroneously deleting
4472        // the object or converting the object to a normal `Arc<T, A>`.
4473        //
4474        // Note that we don't need to test if the weak counter is locked because there
4475        // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4476        // the weak counter.
4477        //
4478        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4479        let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4480
4481        // See comments in Arc::clone() for why we do this (for mem::forget).
4482        if old_size > MAX_REFCOUNT {
4483            abort();
4484        }
4485
4486        Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4487    }
4488}
4489
4490#[unstable(feature = "unique_rc_arc", issue = "112566")]
4491impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4492    type Target = T;
4493
4494    fn deref(&self) -> &T {
4495        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4496        unsafe { &self.ptr.as_ref().data }
4497    }
4498}
4499
4500// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4501#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4502unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4503
4504#[unstable(feature = "unique_rc_arc", issue = "112566")]
4505impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4506    fn deref_mut(&mut self) -> &mut T {
4507        // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4508        // have unique ownership and therefore it's safe to make a mutable reference because
4509        // `UniqueArc` owns the only strong reference to itself.
4510        // We also need to be careful to only create a mutable reference to the `data` field,
4511        // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4512        // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4513        unsafe { &mut (*self.ptr.as_ptr()).data }
4514    }
4515}
4516
4517#[unstable(feature = "unique_rc_arc", issue = "112566")]
4518// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4519unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4520
4521#[unstable(feature = "unique_rc_arc", issue = "112566")]
4522unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4523    fn drop(&mut self) {
4524        // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4525        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4526        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4527
4528        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4529    }
4530}
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy