swmr_spin_lock.cpp 1.32 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#include "pls/internal/base/swmr_spin_lock.h"
#include "pls/internal/base/system_details.h"

namespace pls {
namespace internal {
namespace base {

bool swmr_spin_lock::reader_try_lock() {
  PROFILE_LOCK("Try Acquire Read Lock")
  if (write_request_.load(std::memory_order_relaxed) == 1) {
    return false;
  }
  // We think we can enter the region
14
  readers_.fetch_add(1, std::memory_order_acquire);
15 16 17 18 19 20 21 22 23 24 25
  if (write_request_.load() == 1) {
    // Whoops, the writer acquires the lock, so we back off again
    readers_--;
    return false;
  }

  return true;
}

void swmr_spin_lock::reader_unlock() {
  PROFILE_LOCK("Release Read Lock")
26
  readers_.fetch_add(-1, std::memory_order_release);
27 28 29 30 31
}

void swmr_spin_lock::writer_lock() {
  PROFILE_LOCK("Acquire Write Lock")
  // Tell the readers that we would like to write
32 33 34 35 36 37 38 39 40
  int expected;
  while (true) {
    expected = 0;
    if (write_request_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) {
      break;
    }
    system_details::relax_cpu(); // Spin until WE set the write lock flag
  }

41
  // Wait for all of them to exit the critical section
42
  while (readers_.load() > 0)
43 44 45 46 47
    system_details::relax_cpu(); // Spin, not expensive as relaxed load
}

void swmr_spin_lock::writer_unlock() {
  PROFILE_LOCK("Release Write Lock")
48
  write_request_.store(0, std::memory_order_release);
49 50 51 52 53
}

}
}
}