diff --git a/layout/style/ServoStyleConstsInlines.h b/layout/style/ServoStyleConstsInlines.h index c1be677ed1db205c5e731991c922652925ea071f..0eea3f40c08f7f8b0290ed3f20ccfdbe0f56e319 100644 --- a/layout/style/ServoStyleConstsInlines.h +++ b/layout/style/ServoStyleConstsInlines.h @@ -175,22 +175,40 @@ inline bool StyleArcInner<T>::DecrementRef() { return true; } -static constexpr const uint64_t kArcSliceCanary = 0xf3f3f3f3f3f3f3f3; +template <typename H, typename T> +inline bool StyleHeaderSlice<H, T>::operator==( + const StyleHeaderSlice& aOther) const { + return header == aOther.header && AsSpan() == aOther.AsSpan(); +} -#define ASSERT_CANARY \ - MOZ_DIAGNOSTIC_ASSERT(_0.ptr->data.header.header == kArcSliceCanary, "Uh?"); +template <typename H, typename T> +inline bool StyleHeaderSlice<H, T>::operator!=( + const StyleHeaderSlice& aOther) const { + return !(*this == aOther); +} -template <typename T> -inline StyleArcSlice<T>::StyleArcSlice() { - _0.ptr = reinterpret_cast<decltype(_0.ptr)>(Servo_StyleArcSlice_EmptyPtr()); - ASSERT_CANARY +template <typename H, typename T> +inline StyleHeaderSlice<H, T>::~StyleHeaderSlice() { + for (T& elem : Span(data, len)) { + elem.~T(); + } } +template <typename H, typename T> +inline Span<const T> StyleHeaderSlice<H, T>::AsSpan() const { + // Explicitly specify template argument here to avoid instantiating Span<T> + // first and then implicitly converting to Span<const T> + return Span<const T>{data, len}; +} + +static constexpr const uint64_t kArcSliceCanary = 0xf3f3f3f3f3f3f3f3; + +#define ASSERT_CANARY \ + MOZ_DIAGNOSTIC_ASSERT(_0.p->data.header == kArcSliceCanary, "Uh?"); + template <typename T> -inline StyleArcSlice<T>::StyleArcSlice(const StyleArcSlice& aOther) { - MOZ_DIAGNOSTIC_ASSERT(aOther._0.ptr); - _0.ptr = aOther._0.ptr; - _0.ptr->IncrementRef(); +inline StyleArcSlice<T>::StyleArcSlice() + : _0(reinterpret_cast<decltype(_0.p)>(Servo_StyleArcSlice_EmptyPtr())) { ASSERT_CANARY } @@ -198,82 +216,26 @@ template <typename T> inline StyleArcSlice<T>::StyleArcSlice( const StyleForgottenArcSlicePtr<T>& aPtr) { // See the forget() implementation to see why reinterpret_cast() is ok. - _0.ptr = reinterpret_cast<decltype(_0.ptr)>(aPtr._0); + _0.p = reinterpret_cast<decltype(_0.p)>(aPtr._0); ASSERT_CANARY } template <typename T> inline size_t StyleArcSlice<T>::Length() const { ASSERT_CANARY - return _0.ptr->data.header.length; + return _0->Length(); } template <typename T> inline bool StyleArcSlice<T>::IsEmpty() const { ASSERT_CANARY - return Length() == 0; + return _0->IsEmpty(); } template <typename T> inline Span<const T> StyleArcSlice<T>::AsSpan() const { ASSERT_CANARY - // Explicitly specify template argument here to avoid instantiating Span<T> - // first and then implicitly converting to Span<const T> - return Span<const T>{_0.ptr->data.slice, Length()}; -} - -template <typename T> -inline bool StyleArcSlice<T>::operator==(const StyleArcSlice& aOther) const { - ASSERT_CANARY - return _0.ptr == aOther._0.ptr || AsSpan() == aOther.AsSpan(); -} - -template <typename T> -inline bool StyleArcSlice<T>::operator!=(const StyleArcSlice& aOther) const { - return !(*this == aOther); -} - -template <typename T> -inline void StyleArcSlice<T>::Release() { - ASSERT_CANARY - if (MOZ_LIKELY(!_0.ptr->DecrementRef())) { - return; - } - for (T& elem : Span(_0.ptr->data.slice, Length())) { - elem.~T(); - } - free(_0.ptr); // Drop the allocation now. -} - -template <typename T> -inline StyleArcSlice<T>::~StyleArcSlice() { - Release(); -} - -template <typename T> -inline StyleArcSlice<T>& StyleArcSlice<T>::operator=(StyleArcSlice&& aOther) { - ASSERT_CANARY - std::swap(_0.ptr, aOther._0.ptr); - ASSERT_CANARY - return *this; -} - -template <typename T> -inline StyleArcSlice<T>& StyleArcSlice<T>::operator=( - const StyleArcSlice& aOther) { - ASSERT_CANARY - - if (_0.ptr == aOther._0.ptr) { - return *this; - } - - Release(); - - _0.ptr = aOther._0.ptr; - _0.ptr->IncrementRef(); - - ASSERT_CANARY - return *this; + return _0->AsSpan(); } #undef ASSERT_CANARY diff --git a/servo/components/selectors/builder.rs b/servo/components/selectors/builder.rs index 2a164a9ebae11d78860b9e79bd9e544113474532..b91b3f28cb03f01d84fcfb6ba84904b9bdc320bc 100644 --- a/servo/components/selectors/builder.rs +++ b/servo/components/selectors/builder.rs @@ -19,7 +19,7 @@ use crate::parser::{Combinator, Component, RelativeSelector, Selector, SelectorImpl}; use crate::sink::Push; -use servo_arc::{Arc, HeaderWithLength, ThinArc}; +use servo_arc::{Arc, ThinArc}; use smallvec::{self, SmallVec}; use std::cmp; use std::iter; @@ -105,32 +105,22 @@ impl<Impl: SelectorImpl> SelectorBuilder<Impl> { &mut self, spec: SpecificityAndFlags, ) -> ThinArc<SpecificityAndFlags, Component<Impl>> { - // First, compute the total number of Components we'll need to allocate - // space for. - let full_len = self.simple_selectors.len() + self.combinators.len(); - - // Create the header. - let header = HeaderWithLength::new(spec, full_len); - // Create the Arc using an iterator that drains our buffers. - - // Use a raw pointer to be able to call set_len despite "borrowing" the slice. - // This is similar to SmallVec::drain, but we use a slice here because - // we’re gonna traverse it non-linearly. - let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors; - unsafe { - // Panic-safety: if SelectorBuilderIter is not iterated to the end, - // some simple selectors will safely leak. - self.simple_selectors.set_len(0) - } - let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len); + // Panic-safety: if SelectorBuilderIter is not iterated to the end, some simple selectors + // will safely leak. + let raw_simple_selectors = unsafe { + let simple_selectors_len = self.simple_selectors.len(); + self.simple_selectors.set_len(0); + std::slice::from_raw_parts(self.simple_selectors.as_ptr(), simple_selectors_len) + }; + let (rest, current) = split_from_end(raw_simple_selectors, self.current_len); let iter = SelectorBuilderIter { current_simple_selectors: current.iter(), rest_of_simple_selectors: rest, combinators: self.combinators.drain(..).rev(), }; - Arc::into_thin(Arc::from_header_and_iter(header, iter)) + Arc::from_header_and_iter(spec, iter) } } diff --git a/servo/components/selectors/parser.rs b/servo/components/selectors/parser.rs index e1b075ccad1f2e2c841efafaef08b4329cd31476..ca587e9846890f00b53ef4fd8033e8811b3269d5 100644 --- a/servo/components/selectors/parser.rs +++ b/servo/components/selectors/parser.rs @@ -19,7 +19,7 @@ use cssparser::{BasicParseError, BasicParseErrorKind, ParseError, ParseErrorKind use cssparser::{CowRcStr, Delimiter, SourceLocation}; use cssparser::{Parser as CssParser, ToCss, Token}; use precomputed_hash::PrecomputedHash; -use servo_arc::{HeaderWithLength, ThinArc, UniqueArc}; +use servo_arc::{ThinArc, UniqueArc}; use smallvec::SmallVec; use std::borrow::{Borrow, Cow}; use std::fmt::{self, Debug}; @@ -640,7 +640,7 @@ pub struct Selector<Impl: SelectorImpl>( impl<Impl: SelectorImpl> Selector<Impl> { /// See Arc::mark_as_intentionally_leaked pub fn mark_as_intentionally_leaked(&self) { - self.0.with_arc(|a| a.mark_as_intentionally_leaked()) + self.0.mark_as_intentionally_leaked() } fn ampersand() -> Self { @@ -655,32 +655,32 @@ impl<Impl: SelectorImpl> Selector<Impl> { #[inline] pub fn specificity(&self) -> u32 { - self.0.header.header.specificity() + self.0.header.specificity() } #[inline] fn flags(&self) -> SelectorFlags { - self.0.header.header.flags + self.0.header.flags } #[inline] pub fn has_pseudo_element(&self) -> bool { - self.0.header.header.has_pseudo_element() + self.0.header.has_pseudo_element() } #[inline] pub fn has_parent_selector(&self) -> bool { - self.0.header.header.has_parent_selector() + self.0.header.has_parent_selector() } #[inline] pub fn is_slotted(&self) -> bool { - self.0.header.header.is_slotted() + self.0.header.is_slotted() } #[inline] pub fn is_part(&self) -> bool { - self.0.header.header.is_part() + self.0.header.is_part() } #[inline] @@ -770,7 +770,7 @@ impl<Impl: SelectorImpl> Selector<Impl> { } SelectorIter { - iter: self.0.slice[..self.len() - 2].iter(), + iter: self.0.slice()[..self.len() - 2].iter(), next_combinator: None, } } @@ -802,7 +802,7 @@ impl<Impl: SelectorImpl> Selector<Impl> { /// skipping the rightmost |offset| Components. #[inline] pub fn iter_from(&self, offset: usize) -> SelectorIter<Impl> { - let iter = self.0.slice[offset..].iter(); + let iter = self.0.slice()[offset..].iter(); SelectorIter { iter, next_combinator: None, @@ -813,7 +813,7 @@ impl<Impl: SelectorImpl> Selector<Impl> { /// or panics if the component is not a combinator. #[inline] pub fn combinator_at_match_order(&self, index: usize) -> Combinator { - match self.0.slice[index] { + match self.0.slice()[index] { Component::Combinator(c) => c, ref other => panic!( "Not a combinator: {:?}, {:?}, index: {}", @@ -826,14 +826,14 @@ impl<Impl: SelectorImpl> Selector<Impl> { /// combinators, in matching order (from right to left). #[inline] pub fn iter_raw_match_order(&self) -> slice::Iter<Component<Impl>> { - self.0.slice.iter() + self.0.slice().iter() } /// Returns the combinator at index `index` (zero-indexed from the left), /// or panics if the component is not a combinator. #[inline] pub fn combinator_at_parse_order(&self, index: usize) -> Combinator { - match self.0.slice[self.len() - index - 1] { + match self.0.slice()[self.len() - index - 1] { Component::Combinator(c) => c, ref other => panic!( "Not a combinator: {:?}, {:?}, index: {}", @@ -847,7 +847,7 @@ impl<Impl: SelectorImpl> Selector<Impl> { /// `offset`. #[inline] pub fn iter_raw_parse_order_from(&self, offset: usize) -> Rev<slice::Iter<Component<Impl>>> { - self.0.slice[..self.len() - offset].iter().rev() + self.0.slice()[..self.len() - offset].iter().rev() } /// Creates a Selector from a vec of Components, specified in parse order. Used in tests. @@ -972,8 +972,7 @@ impl<Impl: SelectorImpl> Selector<Impl> { .chain(std::iter::once(Component::Is( parent.to_vec().into_boxed_slice(), ))); - let header = HeaderWithLength::new(specificity_and_flags, len); - UniqueArc::from_header_and_iter_with_size(header, iter, len) + UniqueArc::from_header_and_iter_with_size(specificity_and_flags, iter, len) } else { let iter = self.iter_raw_match_order().map(|component| { use self::Component::*; @@ -1065,17 +1064,16 @@ impl<Impl: SelectorImpl> Selector<Impl> { )), } }); - let header = HeaderWithLength::new(specificity_and_flags, iter.len()); - UniqueArc::from_header_and_iter(header, iter) + UniqueArc::from_header_and_iter(specificity_and_flags, iter) }; items.header_mut().specificity = specificity.into(); - Selector(items.shareable_thin()) + Selector(items.shareable()) } /// Returns count of simple selectors and combinators in the Selector. #[inline] pub fn len(&self) -> usize { - self.0.slice.len() + self.0.len() } /// Returns the address on the heap of the ThinArc for memory reporting. @@ -1474,13 +1472,13 @@ impl<Impl: SelectorImpl> NthOfSelectorData<Impl> { /// Returns the An+B part of the selector #[inline] pub fn nth_data(&self) -> &NthSelectorData { - &self.0.header.header + &self.0.header } /// Returns the selector list part of the selector #[inline] pub fn selectors(&self) -> &[Selector<Impl>] { - &self.0.slice + self.0.slice() } } diff --git a/servo/components/servo_arc/lib.rs b/servo/components/servo_arc/lib.rs index cc71827283aa2df222d70a232899c8d77ce42811..509c6b136b011dd83fd3b795eb636246cf21ba4a 100644 --- a/servo/components/servo_arc/lib.rs +++ b/servo/components/servo_arc/lib.rs @@ -45,7 +45,6 @@ use std::ops::{Deref, DerefMut}; use std::os::raw::c_void; use std::process; use std::ptr; -use std::slice; use std::sync::atomic; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use std::{isize, usize}; @@ -351,6 +350,11 @@ impl<T: ?Sized> Arc<T> { fn ptr(&self) -> *mut ArcInner<T> { self.p.as_ptr() } + + /// Returns a raw ptr to the underlying allocation. + pub fn raw_ptr(&self) -> *const c_void { + self.p.as_ptr() as *const _ + } } #[cfg(feature = "gecko_refcount_logging")] @@ -643,14 +647,68 @@ impl<T: Serialize> Serialize for Arc<T> { /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. -#[derive(Debug, Eq, PartialEq, PartialOrd)] +/// +/// cbindgen:derive-eq=false +/// cbindgen:derive-neq=false +#[derive(Debug, Eq)] #[repr(C)] -pub struct HeaderSlice<H, T: ?Sized> { +pub struct HeaderSlice<H, T> { /// The fixed-sized data. pub header: H, + /// The length of the slice at our end. + len: usize, + /// The dynamically-sized data. - pub slice: T, + data: [T; 0], +} + +impl<H: PartialEq, T: PartialEq> PartialEq for HeaderSlice<H, T> { + fn eq(&self, other: &Self) -> bool { + self.header == other.header && self.slice() == other.slice() + } +} + +impl<H, T> Drop for HeaderSlice<H, T> { + fn drop(&mut self) { + unsafe { + let mut ptr = self.data.as_mut_ptr(); + for _ in 0..self.len { + std::ptr::drop_in_place(ptr); + ptr = ptr.offset(1); + } + } + } +} + +impl<H, T> HeaderSlice<H, T> { + /// Returns the dynamically sized slice in this HeaderSlice. + #[inline(always)] + pub fn slice(&self) -> &[T] { + unsafe { std::slice::from_raw_parts(self.data(), self.len) } + } + + #[inline(always)] + fn data(&self) -> *const T { + std::ptr::addr_of!(self.data) as *const _ + } + + #[inline(always)] + fn data_mut(&mut self) -> *mut T { + std::ptr::addr_of_mut!(self.data) as *mut _ + } + + /// Returns the dynamically sized slice in this HeaderSlice. + #[inline(always)] + pub fn slice_mut(&mut self) -> &mut [T] { + unsafe { std::slice::from_raw_parts_mut(self.data_mut(), self.len) } + } + + /// Returns the len of the slice. + #[inline(always)] + pub fn len(&self) -> usize { + self.len + } } #[inline(always)] @@ -658,7 +716,7 @@ fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } -impl<H, T> Arc<HeaderSlice<H, [T]>> { +impl<H, T> Arc<HeaderSlice<H, T>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. /// @@ -671,7 +729,7 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { /// then `alloc` must return an allocation that can be dellocated /// by calling Box::from_raw::<ArcInner<HeaderSlice<H, T>>> on it. #[inline] - fn from_header_and_iter_alloc<F, I>( + pub fn from_header_and_iter_alloc<F, I>( alloc: F, header: H, mut items: I, @@ -684,31 +742,11 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { { assert_ne!(size_of::<T>(), 0, "Need to think about ZST"); - let inner_align = align_of::<ArcInner<HeaderSlice<H, [T; 0]>>>(); + let size = size_of::<ArcInner<HeaderSlice<H, T>>>() + size_of::<T>() * num_items; + let inner_align = align_of::<ArcInner<HeaderSlice<H, T>>>(); debug_assert!(inner_align >= align_of::<T>()); - // Compute the required size for the allocation. - let size = { - // Next, synthesize a totally garbage (but properly aligned) pointer - // to a sequence of T. - let fake_slice_ptr = inner_align as *const T; - - // Convert that sequence to a fat pointer. The address component of - // the fat pointer will be garbage, but the length will be correct. - let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; - - // Pretend the garbage address points to our allocation target (with - // a trailing sequence of T), rather than just a sequence of T. - let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; - let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; - - // Use size_of_val, which will combine static information about the - // type with the length from the fat pointer. The garbage address - // will not be used. - mem::size_of_val(fake_ref) - }; - - let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; + let ptr: *mut ArcInner<HeaderSlice<H, T>>; unsafe { // Allocate the buffer. let layout = if inner_align <= align_of::<usize>() { @@ -723,15 +761,7 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { }; let buffer = alloc(layout); - - // Synthesize the fat pointer. We do this by claiming we have a direct - // pointer to a [T], and then changing the type of the borrow. The key - // point here is that the length portion of the fat pointer applies - // only to the number of elements in the dynamically-sized portion of - // the type, so the value will be the same whether it points to a [T] - // or something else with a [T] as its last member. - let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); - ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; + ptr = buffer as *mut ArcInner<HeaderSlice<H, T>>; // Write the data. // @@ -744,8 +774,9 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { }; ptr::write(&mut ((*ptr).count), count); ptr::write(&mut ((*ptr).data.header), header); + ptr::write(&mut ((*ptr).data.len), num_items); if num_items != 0 { - let mut current: *mut T = &mut (*ptr).data.slice[0]; + let mut current = std::ptr::addr_of_mut!((*ptr).data.data) as *mut T; for _ in 0..num_items { ptr::write( current, @@ -778,8 +809,8 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { // Return the fat Arc. assert_eq!( size_of::<Self>(), - size_of::<usize>() * 2, - "The Arc will be fat" + size_of::<usize>(), + "The Arc should be thin" ); unsafe { Arc { @@ -839,208 +870,18 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> { } } -/// Header data with an inline length. Consumers that use HeaderWithLength as the -/// Header type in HeaderSlice can take advantage of ThinArc. -#[derive(Debug, Eq, PartialEq, PartialOrd)] -#[repr(C)] -pub struct HeaderWithLength<H> { - /// The fixed-sized data. - pub header: H, - - /// The slice length. - length: usize, -} - -impl<H> HeaderWithLength<H> { - /// Creates a new HeaderWithLength. - pub fn new(header: H, length: usize) -> Self { - HeaderWithLength { header, length } - } -} - -type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; - -/// A "thin" `Arc` containing dynamically sized data -/// /// This is functionally equivalent to Arc<(H, [T])> /// -/// When you create an `Arc` containing a dynamically sized type -/// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack -/// as a "fat pointer", where the length of the slice is stored -/// alongside the `Arc`'s pointer. In some situations you may wish to -/// have a thin pointer instead, perhaps for FFI compatibility -/// or space efficiency. -/// -/// Note that we use `[T; 0]` in order to have the right alignment for `T`. -/// -/// `ThinArc` solves this by storing the length in the allocation itself, -/// via `HeaderSliceWithLength`. -#[repr(C)] -pub struct ThinArc<H, T> { - ptr: ptr::NonNull<ArcInner<HeaderSliceWithLength<H, [T; 0]>>>, - phantom: PhantomData<(H, T)>, -} - -impl<H: fmt::Debug, T: fmt::Debug> fmt::Debug for ThinArc<H, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self.deref(), f) - } -} - -unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} -unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} - -// Synthesize a fat pointer from a thin pointer. -// -// See the comment around the analogous operation in from_header_and_iter. -fn thin_to_thick<H, T>( - thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 0]>>, -) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { - let len = unsafe { (*thin).data.header.length }; - let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; - - fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> -} - -impl<H, T> ThinArc<H, T> { - /// Temporarily converts |self| into a bonafide Arc and exposes it to the - /// provided callback. The refcount is not modified. +/// When you create an `Arc` containing a dynamically sized type like a slice, the `Arc` is +/// represented on the stack as a "fat pointer", where the length of the slice is stored alongside +/// the `Arc`'s pointer. In some situations you may wish to have a thin pointer instead, perhaps +/// for FFI compatibility or space efficiency. `ThinArc` solves this by storing the length in the +/// allocation itself, via `HeaderSlice`. +pub type ThinArc<H, T> = Arc<HeaderSlice<H, T>>; + +impl<H, T> UniqueArc<HeaderSlice<H, T>> { #[inline] - pub fn with_arc<F, U>(&self, f: F) -> U - where - F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U, - { - // Synthesize transient Arc, which never touches the refcount of the ArcInner. - let transient = unsafe { - mem::ManuallyDrop::new(Arc { - p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())), - phantom: PhantomData, - }) - }; - - // Expose the transient Arc to the callback, which may clone it if it wants. - let result = f(&transient); - - // Forward the result. - result - } - - /// Creates a `ThinArc` for a HeaderSlice using the given header struct and - /// iterator to generate the slice. pub fn from_header_and_iter<I>(header: H, items: I) -> Self - where - I: Iterator<Item = T> + ExactSizeIterator, - { - let header = HeaderWithLength::new(header, items.len()); - Arc::into_thin(Arc::from_header_and_iter(header, items)) - } - - /// Create a static `ThinArc` for a HeaderSlice using the given header - /// struct and iterator to generate the slice, placing it in the allocation - /// provided by the specified `alloc` function. - /// - /// `alloc` must return a pointer into a static allocation suitable for - /// storing data with the `Layout` passed into it. The pointer returned by - /// `alloc` will not be freed. - pub unsafe fn static_from_header_and_iter<F, I>(alloc: F, header: H, items: I) -> Self - where - F: FnOnce(Layout) -> *mut u8, - I: Iterator<Item = T> + ExactSizeIterator, - { - let len = items.len(); - let header = HeaderWithLength::new(header, len); - Arc::into_thin(Arc::from_header_and_iter_alloc( - alloc, header, items, len, /* is_static = */ true, - )) - } - - /// Returns the address on the heap of the ThinArc itself -- not the T - /// within it -- for memory reporting, and bindings. - #[inline] - pub fn ptr(&self) -> *const c_void { - self.ptr.as_ptr() as *const ArcInner<T> as *const c_void - } - - /// If this is a static ThinArc, this returns null. - #[inline] - pub fn heap_ptr(&self) -> *const c_void { - let is_static = - ThinArc::with_arc(self, |a| a.inner().count.load(Relaxed) == STATIC_REFCOUNT); - if is_static { - ptr::null() - } else { - self.ptr() - } - } -} - -impl<H, T> Deref for ThinArc<H, T> { - type Target = HeaderSliceWithLength<H, [T]>; - - #[inline] - fn deref(&self) -> &Self::Target { - unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data } - } -} - -impl<H, T> Clone for ThinArc<H, T> { - #[inline] - fn clone(&self) -> Self { - ThinArc::with_arc(self, |a| Arc::into_thin(a.clone())) - } -} - -impl<H, T> Drop for ThinArc<H, T> { - #[inline] - fn drop(&mut self) { - let _ = Arc::from_thin(ThinArc { - ptr: self.ptr, - phantom: PhantomData, - }); - } -} - -impl<H, T> Arc<HeaderSliceWithLength<H, [T]>> { - /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount - /// is not modified. - #[inline] - pub fn into_thin(a: Self) -> ThinArc<H, T> { - assert_eq!( - a.header.length, - a.slice.len(), - "Length needs to be correct for ThinArc to work" - ); - let fat_ptr: *mut ArcInner<HeaderSliceWithLength<H, [T]>> = a.ptr(); - mem::forget(a); - let thin_ptr = fat_ptr as *mut [usize] as *mut usize; - ThinArc { - ptr: unsafe { - ptr::NonNull::new_unchecked( - thin_ptr as *mut ArcInner<HeaderSliceWithLength<H, [T; 0]>>, - ) - }, - phantom: PhantomData, - } - } - - /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount - /// is not modified. - #[inline] - pub fn from_thin(a: ThinArc<H, T>) -> Self { - let ptr = thin_to_thick(a.ptr.as_ptr()); - mem::forget(a); - unsafe { - Arc { - p: ptr::NonNull::new_unchecked(ptr), - phantom: PhantomData, - } - } - } -} - -impl<H, T> UniqueArc<HeaderSliceWithLength<H, [T]>> { - #[inline] - pub fn from_header_and_iter<I>(header: HeaderWithLength<H>, items: I) -> Self where I: Iterator<Item = T> + ExactSizeIterator, { @@ -1049,7 +890,7 @@ impl<H, T> UniqueArc<HeaderSliceWithLength<H, [T]>> { #[inline] pub fn from_header_and_iter_with_size<I>( - header: HeaderWithLength<H>, + header: H, items: I, num_items: usize, ) -> Self @@ -1064,29 +905,16 @@ impl<H, T> UniqueArc<HeaderSliceWithLength<H, [T]>> { /// Returns a mutable reference to the header. pub fn header_mut(&mut self) -> &mut H { // We know this to be uniquely owned - unsafe { &mut (*self.0.ptr()).data.header.header } + unsafe { &mut (*self.0.ptr()).data.header } } /// Returns a mutable reference to the slice. pub fn data_mut(&mut self) -> &mut [T] { // We know this to be uniquely owned - unsafe { &mut (*self.0.ptr()).data.slice } - } - - pub fn shareable_thin(self) -> ThinArc<H, T> { - Arc::into_thin(self.0) + unsafe { (*self.0.ptr()).data.slice_mut() } } } -impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> { - #[inline] - fn eq(&self, other: &ThinArc<H, T>) -> bool { - ThinArc::with_arc(self, |a| ThinArc::with_arc(other, |b| *a == *b)) - } -} - -impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {} - /// A "borrowed `Arc`". This is a pointer to /// a T that is known to have been allocated within an /// `Arc`. @@ -1307,7 +1135,7 @@ impl<A: fmt::Debug, B: fmt::Debug> fmt::Debug for ArcUnion<A, B> { #[cfg(test)] mod tests { - use super::{Arc, HeaderWithLength, ThinArc}; + use super::{Arc, ThinArc}; use std::clone::Clone; use std::ops::Drop; use std::sync::atomic; @@ -1326,13 +1154,9 @@ mod tests { #[test] fn empty_thin() { - let header = HeaderWithLength::new(100u32, 0); - let x = Arc::from_header_and_iter(header, std::iter::empty::<i32>()); - let y = Arc::into_thin(x.clone()); - assert_eq!(y.header.header, 100); - assert!(y.slice.is_empty()); - assert_eq!(x.header.header, 100); - assert!(x.slice.is_empty()); + let x = Arc::from_header_and_iter(100u32, std::iter::empty::<i32>()); + assert_eq!(x.header, 100); + assert!(x.slice().is_empty()); } #[test] @@ -1344,12 +1168,11 @@ mod tests { } // The header will have more alignment than `Padded` - let header = HeaderWithLength::new(0i32, 2); let items = vec![Padded { i: 0xdead }, Padded { i: 0xbeef }]; - let a = ThinArc::from_header_and_iter(header, items.into_iter()); - assert_eq!(a.slice.len(), 2); - assert_eq!(a.slice[0].i, 0xdead); - assert_eq!(a.slice[1].i, 0xbeef); + let a = ThinArc::from_header_and_iter(0i32, items.into_iter()); + assert_eq!(a.len(), 2); + assert_eq!(a.slice()[0].i, 0xdead); + assert_eq!(a.slice()[1].i, 0xbeef); } #[test] @@ -1357,13 +1180,10 @@ mod tests { let mut canary = atomic::AtomicUsize::new(0); let c = Canary(&mut canary as *mut atomic::AtomicUsize); let v = vec![5, 6]; - let header = HeaderWithLength::new(c, v.len()); { - let x = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter())); - let y = ThinArc::with_arc(&x, |q| q.clone()); - let _ = y.clone(); + let x = Arc::from_header_and_iter(c, v.into_iter()); + let _ = x.clone(); let _ = x == x; - Arc::from_thin(x.clone()); } assert_eq!(canary.load(Acquire), 1); } diff --git a/servo/components/style/values/computed/font.rs b/servo/components/style/values/computed/font.rs index 930417d905bc32796b52c2f0d1dbea73fb7f4c2d..725e527c0c000a12951f229c41c1c2470651ea8b 100644 --- a/servo/components/style/values/computed/font.rs +++ b/servo/components/style/values/computed/font.rs @@ -367,7 +367,7 @@ impl FontFamily { generic_font_family!(MOZ_EMOJI, MozEmoji); generic_font_family!(SYSTEM_UI, SystemUi); - match generic { + let family = match generic { GenericFontFamily::None => { debug_assert!(false, "Bogus caller!"); &*SERIF @@ -379,7 +379,9 @@ impl FontFamily { GenericFontFamily::Fantasy => &*FANTASY, GenericFontFamily::MozEmoji => &*MOZ_EMOJI, GenericFontFamily::SystemUi => &*SYSTEM_UI, - } + }; + debug_assert_eq!(*family.families.iter().next().unwrap(), SingleFontFamily::Generic(generic)); + family } } diff --git a/servo/components/style_traits/arc_slice.rs b/servo/components/style_traits/arc_slice.rs index 1f10ed9455cc87202ffd48700e48c0f297d01a96..43f31cb2833f8a47992a93244058147eb59d0fd3 100644 --- a/servo/components/style_traits/arc_slice.rs +++ b/servo/components/style_traits/arc_slice.rs @@ -24,9 +24,6 @@ use malloc_size_of::{MallocSizeOf, MallocSizeOfOps, MallocUnconditionalSizeOf}; const ARC_SLICE_CANARY: u64 = 0xf3f3f3f3f3f3f3f3; /// A wrapper type for a refcounted slice using ThinArc. -/// -/// cbindgen:derive-eq=false -/// cbindgen:derive-neq=false #[repr(C)] #[derive(Debug, Eq, PartialEq, ToShmem)] pub struct ArcSlice<T>(#[shmem(field_bound)] ThinArc<u64, T>); @@ -36,8 +33,8 @@ impl<T> Deref for ArcSlice<T> { #[inline] fn deref(&self) -> &Self::Target { - debug_assert_eq!(self.0.header.header, ARC_SLICE_CANARY); - &self.0.slice + debug_assert_eq!(self.0.header, ARC_SLICE_CANARY); + self.0.slice() } } @@ -111,9 +108,9 @@ impl<T> ArcSlice<T> { where I: Iterator<Item = T> + ExactSizeIterator, { - let thin_arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items); - thin_arc.with_arc(|a| a.mark_as_intentionally_leaked()); - ArcSlice(thin_arc) + let arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items); + arc.mark_as_intentionally_leaked(); + ArcSlice(arc) } /// Creates a value that can be passed via FFI, and forgets this value @@ -122,7 +119,7 @@ impl<T> ArcSlice<T> { #[allow(unsafe_code)] pub fn forget(self) -> ForgottenArcSlicePtr<T> { let ret = unsafe { - ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.ptr() as *const _ as *mut _)) + ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.raw_ptr() as *const _ as *mut _)) }; mem::forget(self); ret @@ -133,14 +130,14 @@ impl<T> ArcSlice<T> { #[inline] pub fn leaked_empty_ptr() -> *mut std::os::raw::c_void { let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone(); - let ptr = empty.0.ptr(); + let ptr = empty.0.raw_ptr(); std::mem::forget(empty); ptr as *mut _ } /// Returns whether there's only one reference to this ArcSlice. pub fn is_unique(&self) -> bool { - self.0.with_arc(|arc| arc.is_unique()) + self.0.is_unique() } } diff --git a/servo/components/to_shmem/lib.rs b/servo/components/to_shmem/lib.rs index 717209b7c65095a801bce0ccae7506e51936a4a6..1e70697165ee85823425cf18ffcbfddd5b8c2c2d 100644 --- a/servo/components/to_shmem/lib.rs +++ b/servo/components/to_shmem/lib.rs @@ -20,7 +20,7 @@ extern crate smallvec; extern crate string_cache; extern crate thin_vec; -use servo_arc::{Arc, ThinArc}; +use servo_arc::{Arc, HeaderSlice}; use smallbitvec::{InternalStorage, SmallBitVec}; use smallvec::{Array, SmallVec}; use std::alloc::Layout; @@ -463,7 +463,7 @@ impl<T: ToShmem> ToShmem for Arc<T> { } } -impl<H: ToShmem, T: ToShmem> ToShmem for ThinArc<H, T> { +impl<H: ToShmem, T: ToShmem> ToShmem for Arc<HeaderSlice<H, T>> { fn to_shmem(&self, builder: &mut SharedMemoryBuilder) -> Result<Self> { // We don't currently have any shared ThinArc values in stylesheets, // so don't support them for now. @@ -476,26 +476,27 @@ impl<H: ToShmem, T: ToShmem> ToShmem for ThinArc<H, T> { // Make a clone of the Arc-owned header and slice values with all of // their heap allocations placed in the shared memory buffer. - let header = self.header.header.to_shmem(builder)?; - let mut values = Vec::with_capacity(self.slice.len()); - for v in self.slice.iter() { + let header = self.header.to_shmem(builder)?; + let mut values = Vec::with_capacity(self.len()); + for v in self.slice().iter() { values.push(v.to_shmem(builder)?); } // Create a new ThinArc with the shared value and have it place // its ArcInner in the shared memory buffer. - unsafe { - let static_arc = ThinArc::static_from_header_and_iter( - |layout| builder.alloc(layout), - ManuallyDrop::into_inner(header), - values.into_iter().map(ManuallyDrop::into_inner), - ); + let len = values.len(); + let static_arc = Self::from_header_and_iter_alloc( + |layout| builder.alloc(layout), + ManuallyDrop::into_inner(header), + values.into_iter().map(ManuallyDrop::into_inner), + len, + /* is_static = */ true, + ); - #[cfg(debug_assertions)] - builder.shared_values.insert(self.heap_ptr()); + #[cfg(debug_assertions)] + builder.shared_values.insert(self.heap_ptr()); - Ok(ManuallyDrop::new(static_arc)) - } + Ok(ManuallyDrop::new(static_arc)) } } diff --git a/servo/ports/geckolib/cbindgen.toml b/servo/ports/geckolib/cbindgen.toml index 69bcfaf612928ed5b5b207cf3da079180fe35564..e1990b20caa36229fc0341db289264b920d72472 100644 --- a/servo/ports/geckolib/cbindgen.toml +++ b/servo/ports/geckolib/cbindgen.toml @@ -638,23 +638,25 @@ renaming_overrides_prefixing = true [[nodiscard]] inline bool DecrementRef(); """ -"ArcSlice" = """ - inline StyleArcSlice(); - inline StyleArcSlice(const StyleArcSlice& aOther); +"HeaderSlice" = """ + StyleHeaderSlice() = delete; + StyleHeaderSlice(const StyleHeaderSlice&) = delete; + + inline ~StyleHeaderSlice(); + inline bool operator==(const StyleHeaderSlice& other) const; + inline bool operator!=(const StyleHeaderSlice& other) const; - inline StyleArcSlice& operator=(const StyleArcSlice&); - inline StyleArcSlice& operator=(StyleArcSlice&&); + inline Span<const T> AsSpan() const; + inline size_t Length() const { return len; } + inline bool IsEmpty() const { return len == 0; } +""" +"ArcSlice" = """ + inline StyleArcSlice(); inline explicit StyleArcSlice(const StyleForgottenArcSlicePtr<T>& aPtr); - private: - inline void Release(); - public: - inline ~StyleArcSlice(); inline Span<const T> AsSpan() const; inline size_t Length() const; inline bool IsEmpty() const; - inline bool operator==(const StyleArcSlice& other) const; - inline bool operator!=(const StyleArcSlice& other) const; """ "Arc" = """ @@ -663,6 +665,9 @@ renaming_overrides_prefixing = true private: inline void Release(); public: + explicit StyleArc(decltype(p) aP) : p(aP) { + MOZ_DIAGNOSTIC_ASSERT(p, "Arc shouldn't be null"); + } inline ~StyleArc(); inline StyleArc& operator=(const StyleArc&);