mutex.c 4.96 KB
Newer Older
1
/*
Marcus Winter committed
2
 * Copyright (c) 2014-2016, Siemens AG. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 * this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <embb/base/c/mutex.h>
28
#include <embb/base/c/thread.h>
29 30 31 32
#include <assert.h>

#include <embb/base/c/internal/unused.h>

33
#ifdef EMBB_PLATFORM_THREADING_WINTHREADS
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62

int embb_mutex_init(embb_mutex_t* mutex, int type) {
  /* Critical sections in Windows are always recursive */
  InitializeCriticalSection(mutex);
  EMBB_UNUSED(type);
  return EMBB_SUCCESS;
}

int embb_mutex_lock(embb_mutex_t* mutex) {
  EnterCriticalSection(mutex);
  return EMBB_SUCCESS;
}

int embb_mutex_try_lock(embb_mutex_t* mutex) {
  BOOL success;
  success = TryEnterCriticalSection(mutex);
  if (success == FALSE) return EMBB_ERROR;
  return EMBB_SUCCESS;
}

int embb_mutex_unlock(embb_mutex_t* mutex) {
  LeaveCriticalSection(mutex);
  return EMBB_SUCCESS;
}

void embb_mutex_destroy(embb_mutex_t* mutex) {
  DeleteCriticalSection(mutex);
}

63
#endif /* EMBB_PLATFORM_THREADING_WINTHREADS */
64

65
#ifdef EMBB_PLATFORM_THREADING_POSIXTHREADS
66 67 68 69 70 71 72 73 74

int embb_mutex_init(embb_mutex_t* mutex, int type) {
  if (type == EMBB_MUTEX_PLAIN) {
    if (pthread_mutex_init(mutex, NULL) != 0) return EMBB_ERROR;
  } else {
    assert(type == EMBB_MUTEX_RECURSIVE);
    pthread_mutexattr_t attributes;
    if (pthread_mutexattr_init(&attributes) != 0) return EMBB_ERROR;
    if (pthread_mutexattr_settype(&attributes, PTHREAD_MUTEX_RECURSIVE) != 0) {
75 76 77 78 79
      pthread_mutexattr_destroy(&attributes);
      return EMBB_ERROR;
    }
    if (pthread_mutex_init(mutex, &attributes) != 0) {
      pthread_mutexattr_destroy(&attributes);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
      return EMBB_ERROR;
    }
    if (pthread_mutexattr_destroy(&attributes) != 0) return EMBB_ERROR;
  }
  return EMBB_SUCCESS;
}

int embb_mutex_lock(embb_mutex_t* mutex) {
  int result = pthread_mutex_lock(mutex);
  if (result != 0) {
    return EMBB_ERROR;
  }
  return EMBB_SUCCESS;
}

int embb_mutex_try_lock(embb_mutex_t* mutex) {
  int result = pthread_mutex_trylock(mutex);
  if (result == 0) {
    return EMBB_SUCCESS;
  }
  if (result == EBUSY) {
    return EMBB_BUSY;
  }
  return EMBB_ERROR;
}

int embb_mutex_unlock(embb_mutex_t* mutex) {
  int result = pthread_mutex_unlock(mutex);
  if (result != 0) {
    return EMBB_ERROR;
  }
  return EMBB_SUCCESS;
}

void embb_mutex_destroy(embb_mutex_t* mutex) {
  pthread_mutex_destroy(mutex);
}

118
#endif /* EMBB_PLATFORM_THREADING_POSIXTHREADS */
119 120

int embb_spin_init(embb_spinlock_t* spinlock) {
Christian Kern committed
121 122 123
  // For now, store the initial value. In the future will use atomic init
  // function (as soon as available).
  embb_atomic_store_int(&spinlock->atomic_spin_variable_, 0);
124
  return EMBB_SUCCESS;
125 126 127 128
}

int embb_spin_lock(embb_spinlock_t* spinlock) {
  int expected = 0;
129
  int spins = 1;
130 131 132 133

  // try to swap the
  while (0 == embb_atomic_compare_and_swap_int(
    &spinlock->atomic_spin_variable_, &expected, 1)) {
134 135 136 137
    if (0 == (spins & 1023)) {
      embb_thread_yield();
    }
    spins++;
138 139 140 141 142 143 144 145
    // reset expected, as CAS might change it...
    expected = 0;
  }
  return EMBB_SUCCESS;
}

int embb_spin_try_lock(embb_spinlock_t* spinlock,
  unsigned int max_number_spins) {
Christian Kern committed
146 147 148
  if (max_number_spins == 0)
    return EMBB_BUSY;

149 150 151 152
  int expected = 0;
  while (0 == embb_atomic_compare_and_swap_int(
    &spinlock->atomic_spin_variable_,
    &expected, 1)) {
Christian Kern committed
153 154
    max_number_spins--;
    if (0 == max_number_spins) {
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
      return EMBB_BUSY;
    }
    expected = 0;
  }

  return EMBB_SUCCESS;
}

int embb_spin_unlock(embb_spinlock_t* spinlock) {
  int expected = 1;
  return embb_atomic_compare_and_swap_int(&spinlock->atomic_spin_variable_,
    &expected, 0) ?
  EMBB_SUCCESS : EMBB_ERROR;
}

void embb_spin_destroy(embb_spinlock_t* spinlock) {
  // for now, doing nothing here... in future, will call the respective
  // destroy function for atomics...
173
  EMBB_UNUSED(spinlock);
174
}