Bonita Montero
2024-08-19 14:22:50 UTC
Reply
Permalinkis that the pointer inside this atomic has two counters which come along
in a size_t'd word. The first counter has a width of 24 bit and counts
the number of control block increments which are in progress, so I'm
also using double reference counting. The remaing 40 bits of the second
word contain a counter which constantly increments when the pointer is
being overwritten. This prevents the lowerr 24 bit to be decremented
if the pointer is already overwritten.
The code is somewhat faster than the futex'd solution of MSVC and
mangitudes faster than the clang's libc++ atomic<shared_ptr<>>.
Here's the code:
#pragma once
#include <cstdint>
#include <atomic>
#include <utility>
#include <cassert>
#include <stdexcept>
#include <type_traits>
#include "dcas_atomic.h"
template<typename T>
struct shared_obj;
template<typename T, typename ... Args>
shared_obj<T> make_shared_obj( Args &&... args );
template<typename T>
struct tshared_obj;
struct dca_update_exception : std::invalid_argument
{
dca_update_exception() : std::invalid_argument( "" ) {};
};
template<typename T>
struct shared_obj
{
shared_obj() noexcept;
shared_obj( shared_obj && ) noexcept;
shared_obj( shared_obj const & ) noexcept;
shared_obj &operator =( shared_obj && ) noexcept;
shared_obj &operator =( shared_obj const & ) noexcept;
~shared_obj();
T *get() const noexcept;
operator bool() const noexcept;
size_t use_count() const noexcept;
T &operator *() const noexcept;
T *operator ->() const noexcept
requires std::is_class_v<T>;
private:
template<typename T2, typename ... Args>
friend shared_obj<T2> make_shared_obj( Args &&... args );
template<typename T2>
friend struct tshared_obj;
struct ctrl_t
{
template<typename ... Args>
ctrl_t( Args &&... args );
T m_obj;
std::atomic<size_t> m_refCount;
};
shared_obj( ctrl_t *obj );
ctrl_t *m_ctrl;
};
template<typename T>
inline shared_obj<T>::shared_obj() noexcept :
m_ctrl( nullptr )
{
}
template<typename T>
inline shared_obj<T>::shared_obj( shared_obj &&other ) noexcept :
m_ctrl( other.m_ctrl )
{
other.m_ctrl = nullptr;
}
template<typename T>
inline shared_obj<T>::shared_obj( shared_obj const &other ) noexcept :
m_ctrl( other.m_ctrl )
{
size_t before = m_ctrl->m_refCount.fetch_add( 1,
std::memory_order_relaxed );
assert(before != (size_t)-1);
}
template<typename T>
template<typename ... Args>
inline shared_obj<T>::ctrl_t::ctrl_t( Args &&... args ) :
m_obj( std::forward<Args>( args ) ... ),
m_refCount( 1 )
{
}
template<typename T>
shared_obj<T> &shared_obj<T>::operator =( shared_obj &&other ) noexcept
{
if( m_ctrl && m_ctrl->m_refCount.fetch_sub( 1,
std::memory_order_release ) == 1 )
delete m_ctrl;
m_ctrl = other.m_ctrl;
other = nullptr;
}
template<typename T>
shared_obj<T> &shared_obj<T>::operator =( shared_obj const &other ) noexcept
{
this->~shared_obj();
m_ctrl = other.m_ctrl;
m_ctrl->fetch_add( 1, std::memory_order_acquire );
}
template<typename T>
shared_obj<T>::~shared_obj()
{
if( m_ctrl && m_ctrl->m_refCount.fetch_sub( 1,
std::memory_order_release ) == 1 )
delete m_ctrl;
}
template<typename T>
inline T *shared_obj<T>::get() const noexcept
{
return &m_ctrl->m_obj;
}
template<typename T>
inline shared_obj<T>::operator bool() const noexcept
{
return (bool)m_ctrl;
}
template<typename T>
inline size_t shared_obj<T>::use_count() const noexcept
{
if( m_ctrl ) [[likely]]
return m_ctrl->m_refCount.load( std::memory_order_relaxed );
return 0;
}
template<typename T>
inline shared_obj<T>::shared_obj( ctrl_t *obj ) :
m_ctrl( obj )
{
}
template<typename T>
inline T &shared_obj<T>::operator *() const noexcept
{
assert(m_ctrl);
return m_ctrl->m_obj;
}
template<typename T>
inline T *shared_obj<T>::operator ->() const noexcept
requires std::is_class_v<T>
{
assert(m_ctrl);
return m_ctrl->m_obj;
}
template<typename T, typename ... Args>
shared_obj<T> make_shared_obj( Args &&... args )
{
return shared_obj<T>( new shared_obj<T>::ctrl_t( std::forward<Args>(
args ) ... ) );
}
template<typename T>
struct tshared_obj
{
tshared_obj();
tshared_obj( tshared_obj && ) noexcept;
tshared_obj( tshared_obj const & ) noexcept;
tshared_obj( shared_obj<T> && ) noexcept;
tshared_obj( shared_obj<T> const & ) noexcept;
~tshared_obj();
tshared_obj &operator =( tshared_obj && ) noexcept;
tshared_obj &operator =( tshared_obj & );
tshared_obj &operator =( shared_obj<T> && ) noexcept;
tshared_obj &operator =( shared_obj<T> const & ) noexcept;
operator shared_obj<T>();
private:
using ctrl_t = shared_obj<T>::ctrl_t;
using pair_t = dcas_atomic::pair_t;
static_assert(sizeof(size_t) == 8, "");
static constexpr unsigned
UPDATER_BITS = 24,
SEQENCE_BITS = 64 - UPDATER_BITS;
static constexpr uint64_t
UPDATER_VALUE = 1,
SEQUENCE_VALUE = 1ull << UPDATER_BITS,
UPDATER_MASK = SEQUENCE_VALUE - 1,
SEQUENCE_MASK = ~UPDATER_MASK;
mutable dcas_atomic m_dca;
ctrl_t *copy();
ctrl_t *remove() noexcept;
void store( ctrl_t *obj ) noexcept;
};
template<typename T>
inline tshared_obj<T>::tshared_obj() :
m_dca( pair_t( 0, 0 ) )
{
}
template<typename T>
tshared_obj<T>::~tshared_obj()
{
ctrl_t *obj = (ctrl_t *)m_dca.first();
if( !obj )
return;
assert(!(m_dca.second() & UPDATER_MASK));
size_t before = obj->m_refCount.fetch_sub( 1, std::memory_order_release );
assert(before > 0);
if( before == 1 )
delete obj;
}
template<typename T>
inline tshared_obj<T>::tshared_obj( shared_obj<T> &&other ) noexcept :
m_dca( pair_t( (size_t)other.m_ctrl, 0 ) )
{
other.m_ctrl = nullptr;
}
template<typename T>
inline tshared_obj<T>::tshared_obj( shared_obj<T> const &other ) noexcept :
m_dca( pair_t( (size_t)other.m_ctrl, 0 ) )
{
other.m_ctrl->m_refCount.fetch_add( 1, std::memory_order_release );
}
template<typename T>
tshared_obj<T>::tshared_obj( tshared_obj &&other ) noexcept :
m_dca( pair_t( (size_t)other.remove(), 0 ) )
{
}
template<typename T>
tshared_obj<T>::tshared_obj( tshared_obj const &other ) noexcept :
m_dca( pair_t( (size_t)other.copy(), 0 ) )
{
}
template<typename T>
tshared_obj<T> &tshared_obj<T>::operator =( shared_obj<T> &&shared )
noexcept
{
ctrl_t *obj = shared.m_ctrl;
shared.m_ctrl = nullptr;
store( obj );
return *this;
}
template<typename T>
tshared_obj<T> &tshared_obj<T>::operator =( shared_obj<T> const &shared
) noexcept
{
ctrl_t *obj = shared.m_ctrl;
size_t before = obj->m_refCount.fetch_add( 1, std::memory_order_relaxed );
assert(before != (size_t)-1);
store( obj );
return *this;
}
template<typename T>
tshared_obj<T> &tshared_obj<T>::operator =( tshared_obj &&other ) noexcept
{
store( other.remove() );
return *this;
}
template<typename T>
tshared_obj<T> &tshared_obj<T>::operator =( tshared_obj &other )
{
store( other.copy() );
return *this;
}
template<typename T>
tshared_obj<T>::operator shared_obj<T>()
{
return shared_obj<T>( copy() );
}
// Copies the content of an atomic shared object to a pointer.
// Proceeds in three steps:
// 1. Increment the update counter.
// 2. Increment the reference counter
// 3. Decrement the update counter if pointer or sequence counter hasn't
changed.
template<typename T>
inline typename tshared_obj<T>::ctrl_t *tshared_obj<T>::copy()
{
pair_t expected( m_dca.first(), m_dca.second() ), desired;
do
if( !expected.first ) [[unlikely]]
return nullptr;
else if( (expected.second & UPDATER_MASK) != UPDATER_MASK ) [[likely]]
desired = { expected.first, expected.second + UPDATER_VALUE };
else
throw dca_update_exception();
while( !m_dca.compare_exchange( expected, desired, dcas_relaxed() ) );
ctrl_t *obj = (ctrl_t *)desired.first;
size_t rcBefore = obj->m_refCount.fetch_add( 1,
std::memory_order_relaxed );
assert(rcBefore > 0 && rcBefore != (size_t)-1);
expected = desired;
uint64_t seq = expected.second >> UPDATER_BITS;
do
if( expected.first == desired.first && expected.second >> UPDATER_BITS
== seq ) [[likely]]
desired.second = expected.second - UPDATER_VALUE;
else
break;
while( !m_dca.compare_exchange( expected, desired, dcas_relaxed() ) );
return obj;
}
// Removes the atomic pointer, preparing it to be moved.
template<typename T>
inline typename tshared_obj<T>::ctrl_t *tshared_obj<T>::remove() noexcept
{
pair_t expected( m_dca.first(), m_dca.second() ), desired;
desired.first = 0;
do
{
if( !expected.first )
return nullptr;
desired.second = expected.second + SEQUENCE_VALUE & SEQUENCE_MASK;
} while( !m_dca.compare_exchange( expected, desired, dcas_relaxed() ) );
return (ctrl_t *)expected.first;
}
// Replaces the atomic pointer and deletes the old if there aren't
pending loads.
template<typename T>
inline void tshared_obj<T>::store( ctrl_t *obj ) noexcept
{
pair_t expected( m_dca.first(), m_dca.second() ), desired;
desired.first = (size_t)obj;
do
if( expected.first != desired.first )
desired.second = expected.first + SEQUENCE_VALUE & SEQUENCE_MASK;
else
return;
while( !m_dca.compare_exchange( expected, desired, dcas_release() ) );
ctrl_t *oldObj = (ctrl_t *)expected.first;
if( !oldObj ) [[unlikely]]
return;
size_t before = oldObj->m_refCount.fetch_sub( 1,
std::memory_order_relaxed );
assert(before);
if( before == 1 && !(expected.second & UPDATER_MASK) ) [[unlikely]]
delete oldObj;
}
Here's the DW-CAS class used by this code:
#pragma once
#include <cstdint>
#include <utility>
#include <atomic>
#include <type_traits>
#include <cstddef>
#include <bit>
#include <memory>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 26495)
#endif
#if defined(__llvm__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Watomic-alignment"
#endif
constexpr int DCAS_SEQ = 0;
constexpr int DCAS_ACQUIRE = 1;
constexpr int DCAS_RELAXED = 2;
constexpr int DCAS_RELEASE = 3;
template<int Type>
using dcas_type = std::integral_constant<int, Type>;
using dcas_seq = dcas_type<DCAS_SEQ>;
using dcas_acquire = dcas_type<DCAS_ACQUIRE>;
using dcas_relaxed = dcas_type<DCAS_RELAXED>;
using dcas_release = dcas_type<DCAS_RELEASE>;
struct dcas_atomic
{
static_assert(sizeof(size_t) == 8 || sizeof(size_t) == 4, "must be 64
or 32 bit architecture");
struct pair_t { size_t first, second; };
dcas_atomic() = default;
dcas_atomic( pair_t const &desired );
size_t first() const noexcept;
size_t second() const noexcept;
size_t first_na() const noexcept;
size_t second_na() const noexcept;
operator pair_t() const noexcept;
template<int Type>
bool compare_exchange( pair_t &expected, pair_t const &desired,
dcas_type<Type> = dcas_relaxed() ) noexcept;
template<int Type>
pair_t load( dcas_type<Type> = dcas_relaxed() ) const noexcept;
template<int Type>
void store( pair_t const &niu, dcas_type<Type> = dcas_relaxed() );
private:
#if defined(__GNUC__) || defined(__clang__)
using dword_t = std::conditional_t<sizeof(size_t) == 8, unsigned
__int128, unsigned long long>;
#endif
static constexpr bool SUBSTITITABLE =
std::atomic<pair_t>::is_always_lock_free || sizeof(std::atomic<pair_t>)
== sizeof(pair_t);
union alignas(2 * sizeof(size_t)) U
{
U() {}
static_assert(sizeof(std::atomic<size_t>) == sizeof(size_t),
"sizeof(atomic<size_t>) must be == sizeof(size_t)");
std::atomic<size_t> m_atomics[2];
std::conditional_t<SUBSTITITABLE, std::atomic<pair_t>, char> m_subsitute;
#if defined(_MSC_VER)
#if defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM64EC)
__int64 volatile m_firstAndSecond[2];
#elif defined(_M_IX86)
__int64 volatile m_firstAndSecond;
#else
#error unknown architecture
#endif
#elif defined(__GNUC__) || defined(__clang__)
dword_t volatile m_firstAndSecond;
#endif
} u;
};
inline dcas_atomic::dcas_atomic( pair_t const &desired )
{
u.m_atomics[0].store( desired.first, std::memory_order_relaxed );
u.m_atomics[1].store( desired.second, std::memory_order_relaxed );
}
inline size_t dcas_atomic::first() const noexcept
{
return u.m_atomics[0].load( std::memory_order_relaxed );
}
inline size_t dcas_atomic::second() const noexcept
{
return u.m_atomics[1].load( std::memory_order_relaxed );
}
inline dcas_atomic::operator pair_t() const noexcept
{
return pair_t( first(), second() );
}
template<int Type>
inline bool dcas_atomic::compare_exchange( pair_t &expected, pair_t
const &desired, dcas_type<Type> ) noexcept
{
using namespace std;
static_assert(Type == DCAS_SEQ || Type == DCAS_ACQUIRE || Type ==
DCAS_RELAXED || Type == DCAS_RELEASE);
if constexpr( !SUBSTITITABLE )
{
#if defined(_MSC_VER)
#if defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM64EC)
__int64 expectedA[2];
expectedA[0] = (__int64)expected.first;
expectedA[1] = (__int64)expected.second;
char fRet;
#if defined(_M_X64)
fRet = _InterlockedCompareExchange128( u.m_firstAndSecond,
desired.second, desired.first, expectedA );
#else
if constexpr( Type == DCAS_SEQ )
fRet = _InterlockedCompareExchange128( u.m_firstAndSecond,
desired.second, desired.first, expectedA );
else if constexpr( Type == DCAS_ACQUIRE )
fRet = _InterlockedCompareExchange128_acq( u.m_firstAndSecond,
desired.second, desired.first, expectedA );
else if constexpr( Type == DCAS_RELAXED )
fRet = _InterlockedCompareExchange128_nf( u.m_firstAndSecond,
desired.second, desired.first, expectedA );
else
fRet = _InterlockedCompareExchange128_rel( u.m_firstAndSecond,
desired.second, desired.first, expectedA );
#endif
if( fRet )
return true;
expected.first = expectedA[0];
expected.second = expectedA[1];
return false;
#elif defined(_M_IX86)
auto compose = []( pair_t const &p ) -> uint64_t { return p.first |
(uint64_t)p.second << 32; };
uint64_t
cDesired = compose( desired ),
cExpected = compose( expected ),
cResult = _InterlockedCompareExchange64( &u.m_firstAndSecond,
cDesired, cExpected );
if( cResult == cExpected ) [[likely]]
return true;
expected.first = (uint32_t)cResult;
expected.second = (uint32_t)(cResult >> 32);
return false;
#else
#error unspupported Windows-platform
#endif
#elif defined(__GNUC__) || defined(__clang__)
constexpr auto
pair_t::*FIRST = std::endian::native == std::endian::little ?
&pair_t::first : &pair_t::second,
pair_t::*SECOND = std::endian::native == std::endian::little ?
&pair_t::second : &pair_t::first;
constexpr unsigned SZT_BITS = sizeof(size_t) * 8;
auto compose = []( pair_t const &p ) -> dword_t { return
(dword_t)(p.*FIRST) | (dword_t)(p.*SECOND) << SZT_BITS; };
dword_t
dwExpected = compose( expected ),
dwDesired = compose( desired );
bool ret;
if constexpr( Type == DCAS_SEQ )
ret = __atomic_compare_exchange_n( &u.m_firstAndSecond, &dwExpected,
dwDesired, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED );
else if constexpr( Type == DCAS_ACQUIRE )
ret = __atomic_compare_exchange_n( &u.m_firstAndSecond, &dwExpected,
dwDesired, true, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
else if constexpr( Type == DCAS_RELAXED )
ret = __atomic_compare_exchange_n( &u.m_firstAndSecond, &dwExpected,
dwDesired, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
else
ret = __atomic_compare_exchange_n( &u.m_firstAndSecond, &dwExpected,
dwDesired, true, __ATOMIC_RELEASE, __ATOMIC_RELAXED );
if( ret ) [[likely]]
return true;
expected.*FIRST = (size_t)dwExpected;
expected.*SECOND = (size_t)(dwExpected >> SZT_BITS);
return false;
#else
#error unspupported platform
#endif
}
else
if constexpr( Type == DCAS_SEQ )
return u.m_subsitute.compare_exchange_weak( expected, desired,
memory_order_seq_cst, memory_order_relaxed );
else if constexpr( Type == DCAS_ACQUIRE )
return u.m_subsitute.compare_exchange_weak( expected, desired,
memory_order_acquire, memory_order_relaxed );
else if constexpr( Type == DCAS_RELAXED )
return u.m_subsitute.compare_exchange_weak( expected, desired,
memory_order_relaxed, memory_order_relaxed );
else
return u.m_subsitute.compare_exchange_weak( expected, desired,
memory_order_release, memory_order_relaxed );
}
template<int Type>
inline typename dcas_atomic::pair_t dcas_atomic::load( dcas_type<Type> )
const noexcept
{
using namespace std;
static_assert(Type == DCAS_SEQ || Type == DCAS_ACQUIRE || Type ==
DCAS_RELAXED || Type == DCAS_RELEASE);
if constexpr( !SUBSTITITABLE )
{
pair_t cmp( *this );
while( !this->compare_exchange( cmp, cmp, std::bool_constant<Type>() ) );
return cmp;
}
else
if constexpr( Type == DCAS_SEQ )
return u.m_subsitute.load( memory_order_seq_cst );
else if constexpr( Type == DCAS_ACQUIRE )
return u.m_subsitute.load( memory_order_acquire );
else if constexpr( Type == DCAS_RELAXED )
return u.m_subsitute.load( memory_order_relaxed );
else
return u.m_subsitute.load( memory_order_release );
}
template<int Type>
inline void dcas_atomic::store( pair_t const &niu, dcas_type<Type> )
{
using namespace std;
static_assert(Type == DCAS_SEQ || Type == DCAS_ACQUIRE || Type ==
DCAS_RELAXED || Type == DCAS_RELEASE);
if constexpr( !SUBSTITITABLE )
{
pair_t cmp( *this );
while( !this->compare_exchange( cmp, niu, dcas_type<Type>() ) );
}
else
if constexpr( Type == DCAS_SEQ )
u.m_subsitute.store( niu, memory_order_seq_cst );
else if constexpr( Type == DCAS_ACQUIRE )
u.m_subsitute.store( niu, memory_order_acquire );
else if constexpr( Type == DCAS_RELAXED )
u.m_subsitute.store( niu, memory_order_relaxed );
else
u.m_subsitute.store( niu, memory_order_release );
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#if defined(__llvm__)
#pragma clang diagnostic pop
#endif