// Alex Horn, University of Oxford // // The source code is structured into three main parts: // // 1) Data structures and algorithm of the linearizability tester, // including a new optional partitioning algorithm; // // 2) Immutable data types for sets, registers and stacks with // efficient equality checks; // // 3) Unit tests and experiments with TBB, EMBB, and etcd. #ifndef __LINEARIZABILITY_TESTER #define __LINEARIZABILITY_TESTER #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // functional testing and experiments #include #include #include /// Allow users to print out counterexamples #define _LT_DEBUG_ /// Built-in timeout feature #define _LT_TIMEOUT_ #ifdef _LT_DEBUG_ #include #include #include #include #include #endif #ifdef _LT_TIMEOUT_ #include #endif #ifdef _DEBUG #ifndef DBG_NEW #define DBG_NEW new ( _NORMAL_BLOCK , __FILE__ , __LINE__ ) #define new DBG_NEW #endif #endif // _DEBUG #if __cplusplus <= 201103L // since C++14 in std, see Herb Sutter's blog template std::unique_ptr make_unique(Args&& ...args) { return std::unique_ptr(new T(std::forward(args)...)); } #else using std::make_unique; #endif /// Linearizability tester namespace lt { /************* Core data structures && algorithms *************/ template class Entry; /// Doubly-linked list of log entries /// S - sequential data type template using EntryPtr = Entry*; /// Bounded stack of call entries that have been linearized /// S - sequential data type template class Stack { private: typedef std::tuple, S> Pair; typedef std::vector Pairs; typedef typename Pairs::size_type SizeType; // A constant-size vector Pairs m_vector; SizeType m_top; public: /// Create a new stack of bounded height /// \post: if capacity is positive, then !is_full() Stack(SizeType capacity) : m_vector(capacity), m_top{ 0U } { assert(capacity == 0U || !is_full()); } /// History length in the stack SizeType size() const noexcept { return m_top; } /// Is size() zero? bool is_empty() const noexcept { return 0U == size(); } /// Is size() equal to the stack's capacity? bool is_full() const noexcept { return m_top == m_vector.size(); } /// \pre: !is_empty() const Pair& top() const noexcept { assert(!is_empty()); return m_vector[m_top - 1U]; } /// Add an entry to the top() of the stack /// \pre: !is_full() /// \pre: ptr->is_call() void push(EntryPtr, S&&); /// Remove count entries from the stack /// \pre: 0 < count <= size() void pop(unsigned count = 1U) { assert(0U < count); assert(count <= size()); m_top -= count; } /// \internal EntryPtr entry_ptr(std::size_t pos) { assert(pos < m_top); return std::get<0>(m_vector[pos]); } }; enum class Option : unsigned char { NEVER_CACHE, LRU_CACHE, ALWAYS_CACHE, }; template class Entry; template class Log; template class ConcurrentLog; template class Slicer; template class LinearizabilityTester; /// A kind of "functor" in C++ terminology /// S - sequential data type template class Op { private: friend class Entry; // Is m_partition defined? bool m_is_partitionable; unsigned m_partition; // modified by Entry unsigned ref_counter; #ifdef _LT_DEBUG_ virtual std::ostream& print(std::ostream&) const = 0; #endif virtual std::pair internal_apply(const S&, const Op&) { return{}; } public: Op() : m_is_partitionable{ false }, m_partition{ 0U }, ref_counter{ 0U } {} Op(unsigned partition) : m_is_partitionable{ true }, m_partition{ partition }, ref_counter{ 0U } {} Op(bool is_partitionable, unsigned partition) : m_is_partitionable{ is_partitionable }, m_partition{ partition }, ref_counter{ 0U } {} virtual ~Op() { assert(ref_counter == 0); } /// Is partition() defined? bool is_partitionable() const noexcept { return m_is_partitionable; } /// \pre: is_partitionable() unsigned partition() const { assert(m_is_partitionable); return m_partition; } /// Returns true exactly if the operation could be applied std::pair apply(const S& s, const Op& op) { return internal_apply(s, op); } #ifdef _LT_DEBUG_ friend std::ostream& operator<<(std::ostream& os, const Op& op) { return op.print(os); } #endif }; /// Fixed-size set of bits with persistence features class Bitset { public: typedef std::size_t Pos; private: friend struct BitsetHash; friend class FlexibleBitset; typedef unsigned long Block; typedef std::vector Blocks; typedef Blocks::size_type BlockIndex; /// Accessible bits in a Block typedef unsigned BlockWidth; static constexpr BlockWidth s_bits_per_block = static_cast(sizeof(Block) * CHAR_BIT); static BlockIndex block_index(Pos pos) noexcept { return pos / s_bits_per_block; } static BlockIndex blocks_size(Pos max_pos) noexcept { return block_index(max_pos) + 1U; } static BlockWidth bit_index(Pos pos) noexcept { return static_cast(pos % s_bits_per_block); } static Block bit_mask(Pos pos) noexcept { return Block(1U) << bit_index(pos); } /// only resized by FlexibleBitset Blocks m_blocks; std::size_t m_hash; unsigned m_number_of_set_bits; Block& find_block(Pos pos) { BlockIndex i{ block_index(pos) }; assert(i < m_blocks.size()); return m_blocks[i]; } // We exploit the fact that XOR forms an abelian group: // first, clear hash of old block; then, hash new block. void update_hash(Block old_block, Block new_block) { m_hash ^= old_block; m_hash ^= new_block; } public: Bitset(Pos max_pos) : m_blocks(blocks_size(max_pos)), m_hash{ 0U }, m_number_of_set_bits{ 0U } {} bool is_empty() const noexcept { return m_number_of_set_bits == 0U; } bool set(Pos pos) { Block& block = find_block(pos); const Block copy_block{ block }; block |= bit_mask(pos); update_hash(copy_block, block); bool ok{ block != copy_block }; m_number_of_set_bits += ok; return ok; } Bitset immutable_set(Pos pos) const { Bitset copy{ *this }; copy.set(pos); return copy; } bool is_set(Pos pos) const { BlockIndex i{ block_index(pos) }; if (i < m_blocks.size()) return (m_blocks[i] & bit_mask(pos)) != 0U; return false; } bool reset(Pos pos) { Block& block = find_block(pos); const Block copy_block{ block }; block &= ~bit_mask(pos); update_hash(copy_block, block); bool ok{ block != copy_block }; m_number_of_set_bits -= ok; return ok; } Bitset immutable_reset(Pos pos) const { Bitset copy{ *this }; copy.reset(pos); return copy; } // Same number of blocks && identical bits in all those blocks? bool operator==(const Bitset& other) const noexcept { return m_number_of_set_bits == other.m_number_of_set_bits && m_blocks == other.m_blocks; } bool operator!=(const Bitset& other) const noexcept { return m_number_of_set_bits != other.m_number_of_set_bits || m_blocks != other.m_blocks; } }; /// Constant-time, O(1), hash function struct BitsetHash { std::size_t operator()(const Bitset& bitset) const noexcept { return bitset.m_hash; } }; /// States of abstract data types namespace state { template struct Hash { std::size_t operator()(const T&) const noexcept; }; } template using OpPtr = std::unique_ptr>; /// Call/ret log entry /// S - sequential data type template class Entry { private: friend class Log; friend class Slicer; friend class LinearizabilityTester; friend class LinearizabilityTester; friend class LinearizabilityTester; // Ref counted pointer because we need to copy logs so that we // can experimentally compare different linearizability testers // // However, this is an implementation detail and the strict type // of OpPtr enforces at compile-time that we manage the // ownership of these kind of pointers on the user's behalf. Op* m_op_ptr; unsigned m_entry_id; std::thread::id m_thread_id; EntryPtr m_match; bool m_is_call; void inc_ref_counter() const noexcept { if (m_op_ptr != nullptr) ++m_op_ptr->ref_counter; } void dec_ref_counter() const { assert(m_op_ptr == nullptr || 0 < m_op_ptr->ref_counter); if (m_op_ptr != nullptr && --m_op_ptr->ref_counter == 0) delete m_op_ptr; } /// Log head /// \post: if _next is !nullptr, then _next->prev == this Entry(EntryPtr _next) : m_op_ptr{ nullptr }, m_entry_id{}, m_thread_id{}, m_match{ nullptr }, m_is_call{ false }, prev{ nullptr }, next{ _next } { if (_next != nullptr) _next->prev = this; } public: ~Entry() { dec_ref_counter(); } EntryPtr prev; EntryPtr next; Entry() : m_op_ptr{ nullptr }, m_entry_id{}, m_thread_id{}, m_match{ nullptr }, m_is_call{ false }, prev{ nullptr }, next{ nullptr } {} Entry(const Entry& entry) : m_op_ptr{ entry.m_op_ptr }, m_entry_id{ entry.m_entry_id }, m_thread_id{ entry.m_thread_id }, m_match{ entry.m_match }, m_is_call{ entry.m_is_call }, prev{ entry.prev }, next{ entry.next } { inc_ref_counter(); } Entry& operator=(const Entry& entry) { entry.inc_ref_counter(); dec_ref_counter(); m_op_ptr = entry.m_op_ptr; m_entry_id = entry.m_entry_id; m_thread_id = entry.m_thread_id; m_match = entry.m_match; m_is_call = entry.m_is_call; prev = entry.prev; next = entry.next; return *this; } Entry& operator=(Entry&& entry) { // only decrement required (due to move semantics) dec_ref_counter(); m_op_ptr = entry.m_op_ptr; m_entry_id = entry.m_entry_id; m_thread_id = entry.m_thread_id; m_match = entry.m_match; m_is_call = entry.m_is_call; prev = entry.prev; next = entry.next; entry.m_op_ptr = nullptr; entry.m_entry_id = 0; entry.m_thread_id = 0; entry.m_match = nullptr; entry.m_is_call = false; entry.prev = nullptr; entry.next = nullptr; return *this; } /// \pre: set_match && set_op have been called with non-null arguments bool is_partitionable() const { assert(m_match != nullptr); assert(m_match->m_op_ptr != nullptr); assert(m_op_ptr->m_is_partitionable == m_match->m_op_ptr->m_is_partitionable); assert(m_op_ptr->m_partition == m_match->m_op_ptr->m_partition); return m_op_ptr->m_is_partitionable; } void set_op(OpPtr&& op_ptr) noexcept { m_op_ptr = op_ptr.release(); inc_ref_counter(); } Op& op() const { assert(m_op_ptr != nullptr); return *m_op_ptr; } const Op* const op_ptr() const noexcept { return m_op_ptr; } void set_entry_id(unsigned entry_id) noexcept { m_entry_id = entry_id; } unsigned entry_id() const noexcept { return m_entry_id; } void set_thread_id(std::thread::id thread_id) noexcept { m_thread_id = thread_id; } std::thread::id thread_id() const noexcept { return m_thread_id; } /// \pre: ret_entry_ptr->match() == nullptr /// \pre: !ret_entry_ptr->is_call() /// /// \post: this->is_call() /// \post: this == ret_entry_ptr->match() /// \post: this->match() == ret_entry_ptr /// \post: this->entry_id() == ret_entry_ptr->entry_id() /// \post: if this->is_partitionable() || ret_entry_ptr->is_partitionable(), /// then this->op().partition() == ret_entry_ptr->op().partition() void set_match(EntryPtr ret_entry_ptr) noexcept { assert(ret_entry_ptr->m_match == nullptr); assert(!ret_entry_ptr->is_call()); ret_entry_ptr->m_match = this; ret_entry_ptr->set_entry_id(m_entry_id); if (ret_entry_ptr->op().m_is_partitionable) { op().m_is_partitionable = ret_entry_ptr->op().m_is_partitionable; op().m_partition = ret_entry_ptr->op().m_partition; } else { ret_entry_ptr->op().m_is_partitionable = op().m_is_partitionable; ret_entry_ptr->op().m_partition = op().m_partition; } m_match = ret_entry_ptr; m_is_call = true; assert(is_call()); assert(this == ret_entry_ptr->match()); assert(match() == ret_entry_ptr); assert(entry_id() == ret_entry_ptr->entry_id()); assert(op().m_is_partitionable == ret_entry_ptr->op().m_is_partitionable); assert(op().m_partition == ret_entry_ptr->op().m_partition); } EntryPtr match() const noexcept { return m_match; } bool is_call() const noexcept { return m_is_call; } }; #ifdef _LT_DEBUG_ /// S - sequential data type template std::ostream& operator<<(std::ostream& os, EntryPtr entry_ptr) { if (entry_ptr == nullptr) return os << "entry id: none, thread id: none [nullptr]"; const Entry& entry = *entry_ptr; return os << "entry id: " << entry.entry_id() << ", thread id: " << entry.thread_id() << ", " << (entry.is_call() ? "call: " : "return: ") << entry.op(); } #endif template void Stack::push(EntryPtr ptr, S&& s) { assert(!is_full()); assert(ptr != nullptr); assert(ptr->is_call()); // no overflow m_vector[m_top++] = std::make_pair(ptr, std::move(s)); assert(0U != m_top); } /// Input to linearizabilty testers /// S - sequential data type template class LogInfo { private: friend class Slicer; EntryPtr m_log_head_ptr; std::size_t m_number_of_entries; public: /// \post: is_empty() LogInfo() : m_log_head_ptr{ nullptr }, m_number_of_entries{ 0U } {} /// \pre: number_of_entries is positive && even /// \pre: log_head_ptr is !nullptr /// \post: !is_empty() LogInfo(EntryPtr log_head_ptr, std::size_t number_of_entries) : m_log_head_ptr{ log_head_ptr }, m_number_of_entries{ number_of_entries } { assert(log_head_ptr != nullptr); assert(0U < m_number_of_entries); assert((m_number_of_entries & 1U) == 0U); } /// Ptr to the first entry in the log EntryPtr log_head_ptr() const noexcept { return m_log_head_ptr; } /// Total number of call entries plus return entries. /// Returns even number since every call is paired with a return std::size_t number_of_entries() const noexcept { return m_number_of_entries; } bool is_empty() const noexcept { return m_log_head_ptr == nullptr && m_number_of_entries == 0U; } }; #ifdef _LT_DEBUG_ /// S - sequential data type template std::ostream& operator<<(std::ostream& os, const LogInfo& log_info) { EntryPtr entry_ptr{ log_info.log_head_ptr() }; os << "log info, number of entries: " << log_info.number_of_entries() << std::endl; for (; entry_ptr != nullptr; entry_ptr = entry_ptr->next) os << entry_ptr << std::endl; return os; } #endif /// Bounded history log /// If you need thread-safety, use ConcurrentLog instead. /// /// S - sequential data type template class Log { private: // fixed-size vector typedef std::vector> Entries; public: typedef typename Entries::size_type Size; private: // we never resize the vector and so pointers into it are stable Size m_entry_id, m_index; Entries m_entries; EntryPtr m_last_entry_ptr; void link(Entry& entry) noexcept { if (m_last_entry_ptr != nullptr) m_last_entry_ptr->next = &entry; entry.prev = m_last_entry_ptr; } public: Log(const Log&) = delete; /// A history with at most capacity entries Log(Size capacity) : m_entry_id{ 0U }, m_index{ 0U }, m_entries(capacity), m_last_entry_ptr{ nullptr } {} /// Copy entries Log(LogInfo log_info) : m_entry_id{ 0U }, m_index{ 0U }, m_entries(log_info.number_of_entries()), m_last_entry_ptr{ nullptr } { EntryPtr entry_ptr{ log_info.log_head_ptr() }; std::vector matches(log_info.number_of_entries() >> 1); while (entry_ptr != nullptr) { assert(m_index < m_entries.size()); Entry& new_entry = m_entries[m_index]; new_entry = *entry_ptr; new_entry.m_match = nullptr; link(new_entry); if (new_entry.is_call()) { matches[new_entry.entry_id()] = m_index; } else { Entry& call_entry = m_entries[matches[new_entry.entry_id()]]; call_entry.set_match(&new_entry); } m_last_entry_ptr = &new_entry; entry_ptr = entry_ptr->next; ++m_index; } assert(m_index == m_entries.size()); assert(entry_ptr == nullptr); } EntryPtr add_call(OpPtr&& op_ptr) { assert(m_index < m_entries.size()); Entry& entry = m_entries[m_index++]; entry.set_op(std::move(op_ptr)); entry.set_entry_id(m_entry_id++); link(entry); m_last_entry_ptr = &entry; return m_last_entry_ptr; } /// \post: call_entry_ptr->is_call() EntryPtr add_ret(EntryPtr call_entry_ptr, OpPtr&& op_ptr) { assert(m_index < m_entries.size()); Entry& entry = m_entries[m_index++]; entry.set_op(std::move(op_ptr)); link(entry); m_last_entry_ptr = &entry; call_entry_ptr->set_match(m_last_entry_ptr); assert(call_entry_ptr->is_call()); assert(m_entry_id <= m_index); return m_last_entry_ptr; } EntryPtr log_head_ptr() { return &m_entries.front(); } /// Total number of call entries plus return entries. /// Returns even number since every call is paired with a return std::size_t number_of_entries() const noexcept { return m_index; } LogInfo info() { return{ log_head_ptr(), number_of_entries() }; } }; /// Output of linearizability tester /// S - sequential data type template class Result { private: friend class LinearizabilityTester; friend class LinearizabilityTester; friend class LinearizabilityTester; typedef std::vector> EntryPtrs; bool m_is_linearizable; EntryPtrs m_entry_ptrs; #ifdef _LT_DEBUG_ unsigned m_cutoff_entry_id; EntryPtr m_log_head_ptr; #endif bool m_is_timeout; double m_virtual_memory_usage; double m_resident_set_size; void reset() { m_is_linearizable = true; m_entry_ptrs.clear(); #ifdef _LT_DEBUG_ m_cutoff_entry_id = 0U; m_log_head_ptr = nullptr; #endif m_is_timeout = false; m_virtual_memory_usage = 0.0; m_resident_set_size = 0.0; } public: /// Initially linearizable Result() : m_is_linearizable{ true }, m_entry_ptrs{}, #ifdef _LT_DEBUG_ m_cutoff_entry_id{ 0U }, m_log_head_ptr{ nullptr }, #endif m_is_timeout{ false }, m_virtual_memory_usage{ 0.0 }, m_resident_set_size{ 0.0 } {} /// \pre: !is_timeout() bool is_linearizable() const noexcept { assert(!is_timeout()); return m_is_linearizable; } bool is_timeout() const noexcept { return m_is_timeout; } /// Zero if unknown, unit: MiB double virtual_memory_usage() const noexcept { return m_virtual_memory_usage; } /// Zero if unknown, unit: MiB double resident_set_size() const noexcept { return m_resident_set_size; } #ifdef _LT_DEBUG_ void debug(std::ostream& os, bool verbose = false) { os << "Linearizable: "; if (m_is_linearizable) { os << "Yes" << std::endl; for (EntryPtr entry_ptr : m_entry_ptrs) os << entry_ptr << " : " << entry_ptr->match() << std::endl; return; } os << "No" << std::endl; EntryPtr entry_ptr{ m_log_head_ptr }; for (; entry_ptr != nullptr; entry_ptr = entry_ptr->next) { os << entry_ptr << std::endl; if (entry_ptr->entry_id() == m_cutoff_entry_id) { os << "^ previous entries cannot be linearized" << std::endl; if (!(verbose || entry_ptr->is_call())) return; } } } #endif }; #ifdef _LT_TIMEOUT_ template struct Timeout { const typename Clock::time_point start_time; const typename Clock::duration max_duration; Timeout() : start_time{ Clock::now() }, max_duration{ Clock::duration::max() } {} Timeout(typename Clock::duration duration) : start_time{ Clock::now() }, max_duration{ duration } {} bool is_expired() const { return max_duration < (Clock::now() - start_time); } }; #endif /// Least-recently used cache eviction template> class LruCache { private: typedef std::list List; typedef typename List::iterator ListIterator; typedef std::unordered_map UnorderedMap; typedef typename UnorderedMap::size_type Capacity; const Capacity m_capacity; UnorderedMap m_unordered_map; List m_list; public: typedef typename UnorderedMap::iterator Iterator; LruCache() : m_capacity{ 4096 }, m_unordered_map{ m_capacity } {} LruCache(Capacity capacity) : m_capacity{ capacity }, m_unordered_map{ m_capacity } {} bool insert(Key&& key) { std::pair pair{ m_unordered_map.insert( std::make_pair(std::move(key), m_list.end())) }; if (pair.second) pair.first->second = m_list.insert(m_list.end(), pair.first->first); else m_list.splice(m_list.end(), m_list, pair.first->second); if (m_unordered_map.size() == m_capacity) { auto iter = m_unordered_map.find(m_list.front()); assert(iter != m_unordered_map.cend()); m_unordered_map.erase(iter); m_list.pop_front(); } return pair.second; } }; namespace cache { // regardless of caching, we need to keep track of the current state of type S template using State = std::pair; // if caching is enabled, we use these hash functions template class StateHash { private: // courtesy of Daniel Kroening, see CPROVER source file util/irep.cpp static inline size_t hash_rotl(std::size_t value, unsigned shift) { return (value << shift) | (value >> ((sizeof(value) * 8U) - shift)); } // courtesy of Daniel Kroening, see CPROVER source file util/irep.cpp static inline size_t hash_combine(std::size_t h1, std::size_t h2) { return hash_rotl(h1, 7U) ^ h2; } const BitsetHash m_bitset_hash; const state::Hash m_s_hash; public: StateHash() : m_bitset_hash{}, m_s_hash{} {} std::size_t operator()(const State& state) const noexcept { return hash_combine(m_bitset_hash(state.first), m_s_hash(state.second)); } }; template struct Switch { typedef std::nullptr_t Type; static bool try_insert(const S& s, const EntryPtr entry_ptr, Type& cache, Bitset& bitset) { return true; } }; template struct Switch { typedef LruCache, StateHash> Type; static bool try_insert(const S& s, const EntryPtr entry_ptr, Type& cache, Bitset& bitset) { return cache.insert(std::make_pair(bitset.immutable_set(entry_ptr->entry_id()), s)); } }; template struct Switch { typedef std::unordered_set, StateHash> Type; static bool try_insert(const S& s, const EntryPtr entry_ptr, Type& cache, Bitset& bitset) { unsigned int pos = entry_ptr->entry_id(); lt::Bitset bs = bitset.immutable_set(pos); return std::get<1>(cache.emplace(bs, s)); } }; } /// S - sequential data type template class LinearizabilityTester { private: typedef cache::Switch Cache; // Maximum number of call/ret entries, i.e. half of the // total number of entry pointers reachable in m_log_head const std::size_t m_log_size; // History to linearize, every call is matched by a return const Entry m_log_head; // Invariants: // // * for every EntryPtr `e` in `m_calls`, `e->is_call()` holds // * for every EntryPtr `e`, if `e` in `m_calls`, then `e` is not reachable // from `m_log_head` by following the next pointers. Stack m_calls; #ifdef _LT_TIMEOUT_ Timeout m_timeout; #endif // An approximation of the workload unsigned long m_number_of_iterations; // see http://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-run-time-in-c // // process_mem_usage(double &, double &) - takes two doubles by reference, // attempts to read the system-dependent data for a process' virtual memory // size && resident set size, && return the results in MiB. // // On failure, returns 0.0, 0.0 static void process_mem_usage(double& vm_usage, double& resident_set) { vm_usage = resident_set = 0.0; } // Temporarily remove call_entry_ptr and call_entry_ptr->match() from the log // \pre: call_entry_ptr->is_call() static void lift(const EntryPtr call_entry_ptr) { const Entry& call = *call_entry_ptr; assert(call.is_call()); Entry& match = *call.match(); call.prev->next = call.next; call.next->prev = call.prev; match.prev->next = match.next; if (match.next != nullptr) match.next->prev = match.prev; } // Reinsert call_entry_ptr && call_entry_ptr->match() into the log // \pre: call_entry_ptr->is_call() static void unlift(const EntryPtr call_entry_ptr) { const Entry& call = *call_entry_ptr; assert(call.is_call()); Entry& match = *call.match(); assert(match.prev->next == match.next); match.prev->next = &match; if (match.next != nullptr) match.next->prev = &match; assert(call.prev->next == call.next); call.prev->next = call_entry_ptr; call.next->prev = call_entry_ptr; } void internal_check(Result& result, unsigned& global_linearized_entry_id) { S s, new_s; bool is_entry_linearizable; typename Cache::Type cache; EntryPtr pop_entry_ptr, entry_ptr{ m_log_head.next }; double virtual_memory_usage; double resident_set_size; // fixing the size is !merely an optimization but // necessary for checking the equality of bitsets Bitset linearized_entries(m_log_size); while (m_log_head.next != nullptr) { process_mem_usage(virtual_memory_usage, resident_set_size); result.m_virtual_memory_usage = std::max(result.m_virtual_memory_usage, virtual_memory_usage); result.m_resident_set_size = std::max(result.m_resident_set_size, resident_set_size); #ifdef _LT_TIMEOUT_ if (m_timeout.is_expired()) { result.m_is_timeout = true; break; } #endif ++m_number_of_iterations; assert(entry_ptr != nullptr); if (entry_ptr->is_call()) { assert(!m_calls.is_full()); assert(entry_ptr->match() != nullptr); assert(!linearized_entries.is_set(entry_ptr->entry_id())); std::tie(is_entry_linearizable, new_s) = entry_ptr->op().apply(s, entry_ptr->match()->op()); if (is_entry_linearizable && Cache::try_insert(new_s, entry_ptr, cache, linearized_entries)) { // call entry is always matched up with a return entry assert(entry_ptr->next != nullptr); // provisionally linearize the call entry together with // the associated state produced by the new linearization m_calls.push(entry_ptr, std::move(s)); s = std::move(new_s); linearized_entries.set(entry_ptr->entry_id()); // provisionally remove the call && return entry from the history lift(entry_ptr); // restart from the beginning of the shortened history entry_ptr = m_log_head.next; } else // cannot linearize call entry { // get the next entry in the unmodified history entry_ptr = entry_ptr->next; #ifdef _LT_DEBUG_ global_linearized_entry_id = std::max(global_linearized_entry_id, entry_ptr->entry_id()); #endif } } else // handle "return" entry { if (m_calls.is_empty()) break; assert(!m_calls.is_empty()); // revert state change std::tie(pop_entry_ptr, s) = m_calls.top(); assert(pop_entry_ptr != nullptr); linearized_entries.reset(pop_entry_ptr->entry_id()); m_calls.pop(); // undo the provisional linearization unlift(pop_entry_ptr); // continue after the entry to which we have just backtracked entry_ptr = pop_entry_ptr->next; } } // all call entries linearized? result.m_is_linearizable = m_calls.is_full(); assert(result.m_is_linearizable == (m_log_head.next == nullptr)); // witness linearization std::size_t pos{ 0 }; result.m_entry_ptrs.resize(m_calls.size()); for (EntryPtr& entry_ptr : result.m_entry_ptrs) entry_ptr = m_calls.entry_ptr(pos++); } public: LinearizabilityTester(LogInfo log_info) : m_log_size{ log_info.number_of_entries() >> 1 }, m_log_head{ log_info.log_head_ptr() }, m_calls{ m_log_size }, #ifdef _LT_TIMEOUT_ m_timeout{}, #endif m_number_of_iterations{} {} #ifdef _LT_TIMEOUT_ LinearizabilityTester(LogInfo log_info, std::chrono::steady_clock::duration max_duration) : m_log_size{ log_info.number_of_entries() >> 1 }, m_log_head{ log_info.log_head_ptr() }, m_calls{ m_log_size }, m_timeout{ max_duration }, m_number_of_iterations{} {} #endif /// A rough approximation of the workload unsigned long number_of_iterations() const noexcept { return m_number_of_iterations; } /// Is history linearizable? /// Throws an exception on timeout bool check() { Result result; unsigned disregard_cutoff_entry_id; internal_check(result, disregard_cutoff_entry_id); if (result.is_timeout()) throw std::runtime_error("Timeout!"); return result.is_linearizable(); } void check(Result& result) { result.reset(); #ifdef _LT_DEBUG_ internal_check(result, result.m_cutoff_entry_id); result.m_log_head_ptr = m_log_head.next; #else unsigned disregard_cutoff_entry_id; internal_check(result, disregard_cutoff_entry_id); #endif } }; template void compositional_check(Log& log, Result &result, unsigned number_of_partitions, Duration max_duration) { Slicer slicer{ log.info(), number_of_partitions }; for (unsigned partition = 0; partition < slicer.number_of_partitions; ++partition) { LinearizabilityTester tester{ slicer.sublog_info(partition), max_duration }; tester.check(result); if (!(result.is_timeout() || result.is_linearizable())) break; } } /// RAII class to ensure a thread becomes unjoinable on all paths class Thread { private: std::thread m_thread; public: Thread() : m_thread{} {} Thread(std::thread&& thread) : m_thread(std::move(thread)) {} template Thread(F&& f, Args&&... args) : m_thread(std::forward(f), std::forward(args)...) {} ~Thread() { if (m_thread.joinable()) m_thread.join(); } /// \pre: joinable() /// \post: not joinable() /// Throws std::system_error if an error occurs. void join() { m_thread.join(); } bool joinable() const noexcept { return m_thread.joinable(); } Thread& operator=(Thread&& thread) { m_thread = std::move(thread.m_thread); return *this; } }; /// Partition history into sub-histories /// A slicer partitions the history into independent sub-histories. /// Our partitioning scheme hinges on Theorem 3.6.1 in "The Art of /// Multiprocessor Programming" (Revised Ed.) by Herlihy && Shavit. /// /// Typically only associative concurrent abstract data types (ADTs) /// such as sets && hash tables are suitable for this partitioning /// scheme. && !all operations on such ADTs are always supported. /// For example, the partitioning scheme is incompatible with 0-arg /// operations such as "empty?" on sets. But it is very effective if /// we want to only check linearizability of say "insert", "remove" /// && "contains". /// /// S - sequential data type template class Slicer { private: typedef std::vector> Sublogs; static void slice(const Entry& log_head, Sublogs& sublogs) { const typename Sublogs::size_type n = sublogs.size(); EntryPtr entry_ptr{ log_head.next }, next_entry_ptr; std::vector> last_entry_ptrs(sublogs.size()); std::vector entry_ids(sublogs.size()); typename Sublogs::size_type i; unsigned new_entry_id; while (entry_ptr != nullptr) { i = entry_ptr->op().partition() % n; LogInfo& log_info = sublogs[i]; EntryPtr& last_entry_ptr = last_entry_ptrs[i]; if (log_info.log_head_ptr() == nullptr) { // initialize sub-log assert(entry_ptr->is_call()); assert(last_entry_ptr == nullptr); log_info.m_log_head_ptr = entry_ptr; log_info.m_number_of_entries = 1U; } else { // size of the sub-log increases ++log_info.m_number_of_entries; assert(last_entry_ptr != nullptr); last_entry_ptr->next = entry_ptr; } if (entry_ptr->is_call()) { new_entry_id = entry_ids[i]++; entry_ptr->set_entry_id(new_entry_id); entry_ptr->match()->set_entry_id(new_entry_id); } next_entry_ptr = entry_ptr->next; entry_ptr->prev = last_entry_ptr; entry_ptr->next = nullptr; last_entry_ptr = entry_ptr; entry_ptr = next_entry_ptr; } } Sublogs m_sublogs; unsigned m_current_partition; public: const Entry log_head; const unsigned number_of_partitions; Slicer(LogInfo log_info, unsigned _number_of_partitions) : m_sublogs(_number_of_partitions), m_current_partition{ 0U }, log_head{ log_info.log_head_ptr() }, number_of_partitions{ _number_of_partitions } { slice(log_head, m_sublogs); } const LogInfo& sublog_info(unsigned partition) const { return m_sublogs[partition]; } const LogInfo& next_sublog_info() { static LogInfo s_empty_log; unsigned partition = m_current_partition; ++m_current_partition; if (partition < number_of_partitions) return m_sublogs[partition]; return s_empty_log; } }; /// S - sequential data type template class ConcurrentLog { private: typedef std::vector> Entries; typedef typename Entries::size_type Size; Entries m_entries; std::atomic m_index; static void link(EntryPtr last_entry_ptr, Entry& entry) { if (last_entry_ptr != nullptr) last_entry_ptr->next = &entry; entry.prev = last_entry_ptr; } public: ConcurrentLog(Size capacity) : m_entries(capacity), m_index{ 0U } {} /// \remark thread-safe /// \pre: enough capacity EntryPtr push_back(OpPtr&& op_ptr) { // we use the relaxed memory || der tag because we // do !need to read any other memory locations Size index = m_index.fetch_add(1U, std::memory_order_relaxed); assert(index < m_entries.size()); // There is no data race, see [container.requirements.dataraces] // in Section 23.2.2, paragraph 2, p. 734 in the C++11 language // specification. Since the index was incremented atomically, // each thread accesses a different element in the vector. Entry& entry = m_entries[index]; entry.set_op(std::move(op_ptr)); entry.set_thread_id(std::this_thread::get_id()); return &entry; } /// \remark thread-safe /// \pre: enough capacity /// \post: call_entry_ptr->is_call() EntryPtr push_back(EntryPtr call_entry_ptr, OpPtr&& op_ptr) { EntryPtr entry_ptr = push_back(std::move(op_ptr)); call_entry_ptr->set_match(entry_ptr); assert(call_entry_ptr->is_call()); return entry_ptr; } /// \warning !thread-safe EntryPtr log_head_ptr() { if (m_entries.front().next == nullptr) { unsigned entry_id{ 0U }; Size index{ 0U }; EntryPtr last_entry_ptr{ nullptr }; for (Entry& entry : m_entries) { if (index == m_index) break; ++index; if (entry.is_call()) { entry.set_entry_id(entry_id); entry.match()->set_entry_id(entry_id); ++entry_id; } link(last_entry_ptr, entry); last_entry_ptr = &entry; } } return &m_entries.front(); } /// Total number of call entries plus return entries. /// Returns even number since every call is paired with a return /// /// \warning !thread-safe std::size_t number_of_entries() const noexcept { return m_index.load(); } /// \warning !thread-safe LogInfo info() { return{ log_head_ptr(), number_of_entries() }; } }; /************* Models for sequential abstract data types *************/ class FlexibleBitset { public: typedef Bitset::Pos Pos; private: Bitset m_bitset; void allocate_blocks_if_neccessary(Pos pos) noexcept { if (pos < Bitset::s_bits_per_block) return; assert(0U < pos); Bitset::BlockIndex new_size{ Bitset::blocks_size(pos) }; if (m_bitset.m_blocks.size() < new_size) { m_bitset.m_blocks.resize(new_size); } } public: FlexibleBitset() : m_bitset{ 1U } {} FlexibleBitset(Pos max_pos) : m_bitset{ max_pos } {} bool is_empty() const noexcept { return m_bitset.is_empty(); } bool set(Pos pos) { allocate_blocks_if_neccessary(pos); return m_bitset.set(pos); } bool is_set(Pos pos) const { return m_bitset.is_set(pos); } bool reset(Pos pos) { allocate_blocks_if_neccessary(pos); return m_bitset.reset(pos); } /// Same size && bits? bool operator==(const FlexibleBitset& other) const noexcept { return m_bitset == other.m_bitset; } bool operator!=(const FlexibleBitset& other) const noexcept { return m_bitset != other.m_bitset; } std::size_t hash_code() const noexcept { return m_bitset.m_hash; } }; namespace state { namespace internal { template struct RetOp : public Op { typedef RetOp Base; const Ret ret; RetOp(Ret r) : Op(), ret{ r } {} RetOp(Ret r, unsigned partition) : Op(partition), ret{ r } {} #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { return os << "ret: " << ret; } #endif }; template struct ZeroArgOp : public Op { typedef ZeroArgOp Base; ZeroArgOp() : Op() {} #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { return os << op_name << "()"; } #endif }; template struct ArgOp : public Op { typedef ArgOp Base; const Value value; ArgOp(Value v) : Op(v), value{ v } {} ArgOp(bool is_partitionable, Value v) : Op(is_partitionable, v), value{ v } {} #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { return os << op_name << "(" << std::to_string(value) << ")"; } #endif }; } /// Byte read-write register with CAS class Atomic { public: typedef signed char Value; private: static constexpr char s_read_op_name[5] = "read"; static constexpr char s_write_op_name[6] = "write"; struct ReadRetOp : public Op { private: const bool m_is_pending; const Value m_value; public: ReadRetOp(bool is_pending, Value value) : Op(), m_is_pending(is_pending), m_value{ value } {} bool is_pending() const noexcept { return m_is_pending; } /// \pre: !is_pending() Value value() const { assert(!m_is_pending); return m_value; } #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { if (m_is_pending) return os << "read() : pending"; return os << "read() : " << std::to_string(m_value); } #endif }; struct ReadCallOp : public internal::ZeroArgOp { ReadCallOp() : Base() {} std::pair internal_apply(const Atomic& atomic, const Op& op) override { const ReadRetOp& read_ret = dynamic_cast(op); if (read_ret.is_pending()) return{ true, atomic }; return{ atomic.get() == read_ret.value(), atomic }; } }; struct CASRetOp : public Op { private: // 0: pending, 1: failed, 2: ok const unsigned m_status; public: CASRetOp(unsigned status) : Op(), m_status(status) {} bool is_pending() const noexcept { return m_status == 0U; } /// \pre: !is_pending() bool is_ok() const { assert(0U < m_status); return m_status == 2U; } #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { os << "cas() : "; if (is_pending()) return os << "pending"; if (is_ok()) return os << "succeeded"; return os << "failed"; } #endif }; struct CASCallOp : public Op { const Value current_value, new_value; CASCallOp(Value current_v, Value new_v) : Op(), current_value{ current_v }, new_value{ new_v } {} #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { return os << "cas(" << std::to_string(current_value) << ", " << std::to_string(new_value) << ")"; } #endif std::pair internal_apply(const Atomic& atomic, const Op& op) override { const CASRetOp& cas_ret = dynamic_cast(op); if (cas_ret.is_pending()) { if (atomic.get() == current_value) return{ true, atomic.set(new_value) }; return{ true, atomic }; } if (atomic.get() == current_value) return{ cas_ret.is_ok(), atomic.set(new_value) }; return{ !cas_ret.is_ok(), atomic }; } }; struct WriteRetOp : public Op { const bool is_pending; WriteRetOp(bool pending) : Op(), is_pending(pending) {} #ifdef _LT_DEBUG_ std::ostream& print(std::ostream& os) const override { if (is_pending) return os << "write() : pending"; return os << "write() : succeeded"; } #endif }; struct WriteCallOp : public internal::ArgOp { typedef internal::ArgOp Base; WriteCallOp(Value new_value) : Base(false, new_value) {} std::pair internal_apply(const Atomic& atomic, const Op& op) override { const WriteRetOp& write_ret = dynamic_cast(op); // we don't need to check write_ret.is_pending because if the // write is pending then it could be still linearized last return{ true, atomic.set(Base::value) }; } }; Value m_value; Atomic(Value value) : m_value{ value } {} public: typedef std::unique_ptr> AtomicOpPtr; static AtomicOpPtr make_read_call() { return make_unique(); } static AtomicOpPtr make_read_ret(Value v) { return make_unique(false, v); } static AtomicOpPtr make_read_pending() { return make_unique(true, '\0'); } static AtomicOpPtr make_write_call(Value v) { return make_unique(v); } static AtomicOpPtr make_write_ret() { return make_unique(false); } static AtomicOpPtr make_write_pending() { return make_unique(true); } static AtomicOpPtr make_cas_call(Value curr_value, Value new_value) { return make_unique(curr_value, new_value); } static AtomicOpPtr make_cas_ret(bool ok) { return make_unique(1U + ok); } static AtomicOpPtr make_cas_pending() { return make_unique(0U); } /// Initially, register is negative Atomic() : m_value{ -1 } {} Value get() const noexcept { return m_value; } Atomic set(Value v) const noexcept { return{ v }; } bool operator==(const Atomic& atomic) const { return m_value == atomic.m_value; } bool operator!=(const Atomic& atomic) const { return m_value != atomic.m_value; } }; constexpr char Atomic::s_read_op_name[]; constexpr char Atomic::s_write_op_name[]; template<> struct Hash { std::size_t operator()(const Atomic& atomic) const noexcept { return atomic.get() * 193U; } }; } } #endif