diff --git a/TESTS/mbed_platform/atomic/main.cpp b/TESTS/mbed_platform/atomic/main.cpp index 3a2f99d..62d7179 100644 --- a/TESTS/mbed_platform/atomic/main.cpp +++ b/TESTS/mbed_platform/atomic/main.cpp @@ -26,69 +26,84 @@ using utest::v1::Case; - namespace { /* Lock-free operations will be much faster - keep runtime down */ -#if MBED_ATOMIC_INT_LOCK_FREE -#define ADD_ITERATIONS (SystemCoreClock / 1000) -#else -#define ADD_ITERATIONS (SystemCoreClock / 8000) -#endif +#define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000) +#define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000) -template -void add_incrementer(T *ptr) +template +static inline long add_iterations(A &a) { - for (long i = ADD_ITERATIONS; i > 0; i--) { - core_util_atomic_fetch_add(ptr, T(1)); - } + return a.is_lock_free() ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS; } -template -void add_release_incrementer(T *ptr) -{ - for (long i = ADD_ITERATIONS; i > 0; i--) { - core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release); +template +struct add_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + ++(*ptr); + } } -} +}; -template -void sub_incrementer(T *ptr) -{ - for (long i = ADD_ITERATIONS; i > 0; i--) { - core_util_atomic_fetch_sub(ptr, T(-1)); +template +struct add_release_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + ptr->fetch_add(1, mbed::memory_order_release); + } } -} +}; -template -void bitops_incrementer(T *ptr) -{ - for (long i = ADD_ITERATIONS; i > 0; i--) { - core_util_atomic_fetch_add(ptr, T(1)); - core_util_atomic_fetch_and(ptr, T(-1)); - core_util_atomic_fetch_or(ptr, T(0)); +template +struct sub_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + ptr->fetch_sub(-1); + } } -} +}; -template -void weak_incrementer(T *ptr) -{ - for (long i = ADD_ITERATIONS; i > 0; i--) { - T val = core_util_atomic_load(ptr); - do { - } while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1))); +template +struct bitops_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + (*ptr) += 1; + (*ptr) &= -1; + (*ptr) |= 0; + } } -} +}; -template -void strong_incrementer(T *ptr) -{ - for (long i = ADD_ITERATIONS; i > 0; i--) { - T val = core_util_atomic_load(ptr); - do { - } while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1))); +template +struct weak_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + typename A::value_type val = ptr->load(); + do { + } while (!ptr->compare_exchange_weak(val, val + 1)); + } } -} +}; + +template +struct strong_incrementer { + static void op(A *ptr) + { + for (long i = add_iterations(*ptr); i > 0; i--) { + typename A::value_type val = ptr->load(); + do { + } while (!ptr->compare_exchange_strong(val, val + 1)); + } + } +}; + /* @@ -100,32 +115,34 @@ * Using core_util_atomic_ templates, and exercising * load and store briefly. */ -template +template class Fn> void test_atomic_add() { struct { volatile T nonatomic1; - T atomic1; - T atomic2; + Atomic atomic1; + volatile Atomic atomic2; // use volatile just to exercise the templates' volatile methods volatile T nonatomic2; - } data; + } data = { 0, { 0 }, { 1 }, 0 }; // test initialisation - data.nonatomic1 = 0; - core_util_atomic_store(&data.atomic1, T(0)); - core_util_atomic_store(&data.atomic2, T(0)); - data.nonatomic2 = 0; + TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1); + TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1); + TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data); + + // test store + data.atomic2 = 0; Thread t1(osPriorityNormal, THREAD_STACK); Thread t2(osPriorityNormal, THREAD_STACK); Thread t3(osPriorityNormal, THREAD_STACK); Thread t4(osPriorityNormal, THREAD_STACK); - TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn, &data.atomic1))); - TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn, &data.atomic1))); - TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn, &data.atomic2))); - TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn, &data.atomic2))); + TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn::op, &data.atomic1))); + TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn::op, &data.atomic1))); + TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn::op, &data.atomic2))); + TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn::op, &data.atomic2))); - for (long i = ADD_ITERATIONS; i > 0; i--) { + for (long i = ADD_UNLOCKED_ITERATIONS; i > 0; i--) { data.nonatomic1++; data.nonatomic2++; } @@ -135,10 +152,83 @@ t3.join(); t4.join(); - TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1); - TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1)); - TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2)); - TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2); + TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic1); + TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic1)), data.atomic1); + TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic2)), data.atomic2); + TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic2); +} + +// This should fit into a uint32_t container, and there +// will be 1 byte of padding to ignore. +struct small { + uint8_t a; + uint8_t b; + uint8_t c; +}; + +// An 11-byte weird structure. Should work with critical sections. +struct large { + uint8_t a; + uint8_t b; + uint8_t c; + uint8_t dummy[8]; +}; + +template +void struct_incrementer_a(A *data) +{ + for (long i = add_iterations(*data); i > 0; i--) { + typename A::value_type curval = *data, newval; + do { + newval = curval; + newval.a++; + } while (!data->compare_exchange_weak(curval, newval)); + } +} + +template +void struct_incrementer_b(A *data) +{ + for (long i = add_iterations(*data); i > 0; i--) { + typename A::value_type curval = *data, newval; + do { + newval = curval; + newval.b++; + } while (!data->compare_exchange_weak(curval, newval)); + } +} + +template +void test_atomic_struct() +{ + TEST_ASSERT_EQUAL(N, sizeof(Atomic)); + + // Small structures don't have value constructor implemented; + Atomic data; + atomic_init(&data, T{0, 0, 0}); + + Thread t1(osPriorityNormal, THREAD_STACK); + Thread t2(osPriorityNormal, THREAD_STACK); + + TEST_ASSERT_EQUAL(osOK, t1.start(callback(struct_incrementer_a >, &data))); + TEST_ASSERT_EQUAL(osOK, t2.start(callback(struct_incrementer_b >, &data))); + + for (long i = add_iterations(data); i > 0; i--) { + T curval = data, newval; + do { + newval = curval; + newval.c++; + } while (!data.compare_exchange_weak(curval, newval)); + } + + t1.join(); + t2.join(); + + T final_val = data; + + TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.a); + TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.b); + TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.c); } } // namespace @@ -174,7 +264,9 @@ Case("Test atomic compare exchange strong 8-bit", test_atomic_add), Case("Test atomic compare exchange strong 16-bit", test_atomic_add), Case("Test atomic compare exchange strong 32-bit", test_atomic_add), - Case("Test atomic compare exchange strong 64-bit", test_atomic_add) + Case("Test atomic compare exchange strong 64-bit", test_atomic_add), + Case("Test small atomic custom structure", test_atomic_struct), + Case("Test large atomic custom structure", test_atomic_struct) }; utest::v1::Specification specification(test_setup, cases); diff --git a/UNITTESTS/CMakeLists.txt b/UNITTESTS/CMakeLists.txt index 95806c5..4116426 100644 --- a/UNITTESTS/CMakeLists.txt +++ b/UNITTESTS/CMakeLists.txt @@ -9,10 +9,10 @@ macro(use_cxx14) if (CMAKE_VERSION VERSION_LESS 3.1) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++98") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++14") endif() else() - set(CMAKE_CXX_STANDARD 98) + set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) endif() endmacro() diff --git a/UNITTESTS/stubs/ATHandler_stub.cpp b/UNITTESTS/stubs/ATHandler_stub.cpp index 8f4038f..b69fe50 100644 --- a/UNITTESTS/stubs/ATHandler_stub.cpp +++ b/UNITTESTS/stubs/ATHandler_stub.cpp @@ -55,7 +55,7 @@ bool ATHandler_stub::process_oob_urc = false; int ATHandler_stub::read_string_index = kRead_string_table_size; -const char *ATHandler_stub::read_string_table[kRead_string_table_size] = {'\0'}; +const char *ATHandler_stub::read_string_table[kRead_string_table_size]; int ATHandler_stub::resp_stop_success_count = kResp_stop_count_default; bool ATHandler_stub::get_debug_flag = false; diff --git a/features/cellular/framework/AT/AT_CellularSMS.cpp b/features/cellular/framework/AT/AT_CellularSMS.cpp index dd30b4d..aae8ab5 100644 --- a/features/cellular/framework/AT/AT_CellularSMS.cpp +++ b/features/cellular/framework/AT/AT_CellularSMS.cpp @@ -41,7 +41,7 @@ #define NVAM '?' // Not Valid ascii, ISO-8859-1 mark // mapping table from 7-bit GSM to ascii (ISO-8859-1) -static const char gsm_to_ascii[] = { +static const unsigned char gsm_to_ascii[] = { 64, // 0 163, // 1 36, // 2 @@ -1153,7 +1153,7 @@ char *gsm_str = new char[len]; for (uint16_t y = 0; y < len; y++) { for (int x = 0; x < GSM_TO_ASCII_TABLE_SIZE; x++) { - if (gsm_to_ascii[x] == str[y]) { + if (gsm_to_ascii[x] == static_cast(str[y])) { gsm_str[y] = x; } } diff --git a/mbed.h b/mbed.h index ea99653..e59813d 100644 --- a/mbed.h +++ b/mbed.h @@ -88,7 +88,7 @@ #include "drivers/InterruptIn.h" #include "platform/mbed_wait_api.h" #include "hal/sleep_api.h" -#include "platform/mbed_atomic.h" +#include "platform/Atomic.h" #include "platform/mbed_power_mgmt.h" #include "platform/mbed_rtc_time.h" #include "platform/mbed_poll.h" diff --git a/platform/Atomic.h b/platform/Atomic.h new file mode 100644 index 0000000..ed22b93 --- /dev/null +++ b/platform/Atomic.h @@ -0,0 +1,1236 @@ +/* + * Copyright (c) 2017 ARM Limited + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MBED_ATOMIC_H +#define MBED_ATOMIC_H + +#include +#include +#include +#include "platform/mbed_assert.h" +#include "platform/mbed_atomic.h" +#include "platform/mbed_critical.h" +#include "platform/mbed_cxxsupport.h" +#include "platform/CriticalSectionLock.h" + +/* + * Atomic template and types are designed to be as close as possible to C++11 + * std::atomic. Key differences: + * + * - Operations are specified as atomic with respect to interrupts as well as + * threads + * - "Lock-free" indicates that a critical section is used, otherwise + * exclusive accesses. + * - Default initialization follows C17 and proposed C++2x rules - ie that + * like normal objects they are zero-initialized if static or thread-local, + * else in an indeterminate state when automatic. There is no ATOMIC_VAR_INIT() + * equivalent. + */ + +#ifndef MBED_EXCLUSIVE_ACCESS +#define MBED_ATOMIC_BOOL_LOCK_FREE 0 +#define MBED_ATOMIC_CHAR_LOCK_FREE 0 +#define MBED_ATOMIC_CHAR16_T_LOCK_FREE 0 +#define MBED_ATOMIC_CHAR32_T_LOCK_FREE 0 +#define MBED_ATOMIC_WCHAR_T_LOCK_FREE 0 +#define MBED_ATOMIC_SHORT_LOCK_FREE 0 +#define MBED_ATOMIC_INT_LOCK_FREE 0 +#define MBED_ATOMIC_LONG_LOCK_FREE 0 +#define MBED_ATOMIC_LLONG_LOCK_FREE 0 +#define MBED_ATOMIC_POINTER_LOCK_FREE 0 +#else +#define MBED_ATOMIC_BOOL_LOCK_FREE 2 +#define MBED_ATOMIC_CHAR_LOCK_FREE 2 +#define MBED_ATOMIC_CHAR16_T_LOCK_FREE 2 +#define MBED_ATOMIC_CHAR32_T_LOCK_FREE 2 +#define MBED_ATOMIC_WCHAR_T_LOCK_FREE 2 +#define MBED_ATOMIC_SHORT_LOCK_FREE 2 +#define MBED_ATOMIC_INT_LOCK_FREE 2 +#define MBED_ATOMIC_LONG_LOCK_FREE 2 +#define MBED_ATOMIC_LLONG_LOCK_FREE 0 +#define MBED_ATOMIC_POINTER_LOCK_FREE 2 +#endif + +namespace mbed { + +/** Atomic template + * + * `mbed::Atomic` is intended to work as per C++14 `std::atomic`. `T` must be a + * _TriviallyCopyable_, _CopyConstructible_ and _CopyAssignable_ type. + * - All standard methods of `std::atomic` are supplied: + * + For any `T`: `load`, `store`, `exchange`, `compare_exchange_weak`, + * `compare_exchange_strong`, `operator T`, `operator=(T)`, `Atomic(T)`; + * + For integers and pointers: `++`, `+=`, `--`, `-=`, `fetch_add`, `fetch_sub`; + * + For integers: `&=`, `|=`, `^=`, `fetch_and`, `fetch_or`, `fetch_xor`. + * - Operations are guaranteed atomic with respect to interrupts, and the + * operations can be used in interrupts - `std::atomic` implementations don't specify this. + * - Implementation is optimized for uniprocessor use (no DMB instructions), + * unlike typical `std::atomic` implementations. + * - Lock-free versions (LDREX/STREX) are used for user types if small enough and available, + * otherwise critical sections are used. + * - If used with large objects, interrupt latency may be impacted. + * - Valid initialisation forms are: + * + `Atomic foo;` (zero initialized if static or thread-local, else value indeterminate) + * + `atomic_init(&foo, 2);` (initialize a default-initialized variable, once only, not atomic) + * + `Atomic foo(2);` (value initialization) + * + `Atomic foo = { 2 };` (also legal C11 with _Atomic int) + * + `Atomic foo = 2;` (C++17 or later only - also legal C11 with _Atomic int) + * Note that the lack of a copy constructor limits the simple-looking assignment initialization + * to C++17 or later only. + * - The value constructor is not available for small custom types. + * - `MBED_ATOMIC_XXX_LOCK_FREE` replaces `ATOMIC_XXX_LOCK_FREE` - "locking" forms + * take a critical section, non-locking do not. + * - For `bool`, integer types and pointers, storage is compatible with the + * plain types. If necessary, they can be substituted as plain types for C + * compatibility in headers, and accessed using core_util_atomic functions. + * @code + * struct foo { + * #ifdef __cplusplus + * mbed::atomic_uint32_t counter; // Use C++ templates from C++ code + * #else + * uint32_t counter; // Could use core_util_atomic_xxx_u32 from C code, or just have this for structure layout. + * #endif + * }; + * @endcode + */ +template +class Atomic; + +/* Pull C enum from mbed_critical.h into mbed namespace */ +using memory_order = ::mbed_memory_order; +constexpr memory_order memory_order_relaxed = mbed_memory_order_relaxed; +constexpr memory_order memory_order_consume = mbed_memory_order_consume; +constexpr memory_order memory_order_acquire = mbed_memory_order_acquire; +constexpr memory_order memory_order_release = mbed_memory_order_release; +constexpr memory_order memory_order_acq_rel = mbed_memory_order_acq_rel; +constexpr memory_order memory_order_seq_cst = mbed_memory_order_seq_cst; + +namespace impl { + +/* For types up to uint64_t size, we use the mbed_critical.h functions with + * uintX_t containers. Otherwise, we do it ourselves, with no special alignment. + */ +// *INDENT-OFF* +template +using atomic_container = std::conditional >>>; +// *INDENT-ON* + +template +using atomic_container_t = typename atomic_container::type; + +template +struct atomic_container_is_lock_free; + +template<> +struct atomic_container_is_lock_free : mbed::bool_constant { }; + +template<> +struct atomic_container_is_lock_free : mbed::bool_constant { }; + +template<> +struct atomic_container_is_lock_free : mbed::bool_constant { }; + +template<> +struct atomic_container_is_lock_free : mbed::bool_constant { }; + +template +using atomic_is_lock_free = atomic_container_is_lock_free>; + +/* If one order is given for compare_exchange, it's reduced for failure case that doesn't store */ +MBED_FORCEINLINE constexpr memory_order memorder_for_failure(memory_order order) +{ + return order == memory_order_acq_rel ? memory_order_acquire : + order == memory_order_release ? memory_order_relaxed : order; +} + +/* Base template for a raw Atomic (arbitrary type T), using atomic storage size N. + * This generic implementation uses critical sections and has no alignment requirements. + * There are specialisations for smaller sizes below. + */ +template +struct AtomicBaseRaw { + using value_type = T; + AtomicBaseRaw() noexcept = default; + constexpr AtomicBaseRaw(T v) noexcept : data(std::move(v)) + { + } + bool is_lock_free() const volatile noexcept + { + return false; + } + T load(memory_order order = memory_order_seq_cst) const volatile noexcept + { + MBED_CHECK_LOAD_ORDER(order); + // Cope with T not having default constructor + union { + char c[sizeof(T)]; + T val; + } ret; + { + CriticalSectionLock lock; + memcpy(std::addressof(ret.val), const_cast(std::addressof(data)), sizeof(T)); + } + return std::move(ret.val); + } + T load(memory_order order = memory_order_seq_cst) const noexcept + { + MBED_CHECK_LOAD_ORDER(order); + CriticalSectionLock lock; + return data; + } + void store(T desired, memory_order order = memory_order_seq_cst) volatile noexcept + { + MBED_CHECK_STORE_ORDER(order); + CriticalSectionLock lock; + memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); + } + void store(T desired, memory_order order = memory_order_seq_cst) noexcept + { + MBED_CHECK_STORE_ORDER(order); + CriticalSectionLock lock; + data = std::move(desired); // MoveAssignable + } + T exchange(T desired, memory_order = memory_order_seq_cst) volatile noexcept + { + // Cope with T not having default constructor + union { + char c[sizeof(T)]; + T val; + } old; + { + CriticalSectionLock lock; + memcpy(std::addressof(old.val), const_cast(std::addressof(data)), sizeof(T)); + memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); + } + return old.val; + } + T exchange(T desired, memory_order = memory_order_seq_cst) noexcept + { + CriticalSectionLock lock; + T old = std::move(data); // MoveConstructible + data = std::move(desired); // MoveAssignable + return old; + } + bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + MBED_CHECK_CAS_ORDER(success, failure); + CriticalSectionLock lock; + if (memcmp(const_cast(std::addressof(data)), std::addressof(expected), sizeof(T)) == 0) { + memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); + return true; + } else { + memcpy(std::addressof(expected), const_cast(std::addressof(data)), sizeof(T)); + return false; + } + } + bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) noexcept + { + MBED_CHECK_CAS_ORDER(success, failure); + CriticalSectionLock lock; + if (memcmp(std::addressof(data), std::addressof(expected), sizeof(T)) == 0) { + data = std::move(desired); // MoveAssignable + return true; + } else { + expected = data; // CopyAssignable + return false; + } + } + bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + return compare_exchange_strong(expected, desired, success, failure); + } + bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) noexcept + { + return compare_exchange_strong(expected, desired, success, failure); + } + bool compare_exchange_strong(T &expected, T desired, memory_order order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); + } + bool compare_exchange_strong(T &expected, T desired, memory_order order = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); + } + bool compare_exchange_weak(T &expected, T desired, memory_order order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); + } + bool compare_exchange_weak(T &expected, T desired, memory_order order = memory_order_seq_cst) noexcept + { + return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); + } +protected: + union { + // Having the union makes us just get zero-initialised, as per std::atomic, or our specializations, + // rather than actually running T's default constructor. + char dummy_for_zero_init; + T data; + }; + void init(T desired) volatile noexcept + { + memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); + } + void init(T desired) noexcept + { + data = std::move(desired); + } +}; + +template +struct AtomicSmallStoragePadded { + union { + A u; + struct { + T data; + char pad[N]; + }; + }; + AtomicSmallStoragePadded() noexcept = default; + constexpr AtomicSmallStoragePadded(T v) noexcept : data(std::move(v)), pad{0} + { + } + constexpr AtomicSmallStoragePadded(A v) noexcept : u(v) + { + } +}; + +template +struct AtomicSmallStorageUnpadded { + union { + A u; + T data; + }; + AtomicSmallStorageUnpadded() noexcept = default; + constexpr AtomicSmallStorageUnpadded(T v) noexcept : data(std::move(v)) + { + } + constexpr AtomicSmallStorageUnpadded(A v) noexcept : u(v) + { + } +}; + +// *INDENT-OFF* +template> +using AtomicSmallStorage = std::conditional_t, + AtomicSmallStoragePadded>; +// *INDENT-ON* + +/* Base implementation specialisation for arbitrary small type T of size N, + * using corresponding atomic_xxx functions acting on uintX_t data type A of that size. + * This does involve type punning on a union, so isn't strictly legal, but it's no worse than + * what has always been done with the pointer atomics. + * Always pad when necessary so that compare-exchange works. + * + * It's only worth using the specific small form if there is a real lock-free implementation. + * Otherwise the overhead of shuffling in and out of the integer container is larger than just + * doing it directly. + */ +template +class AtomicBaseRaw::value>> { + AtomicSmallStorage storage; +public: + using value_type = T; + AtomicBaseRaw() noexcept = default; + constexpr AtomicBaseRaw(T v) : storage(std::move(v)) { } + bool is_lock_free() const volatile noexcept + { + return atomic_is_lock_free::value; + } + T load() const volatile noexcept + { + AtomicSmallStorage loaded(core_util_atomic_load(&storage.u)); + return loaded.data; + } + T load(memory_order order) const volatile noexcept + { + AtomicSmallStorage loaded(core_util_atomic_load_explicit(&storage.u, order)); + return loaded.data; + } + T load() const noexcept + { + AtomicSmallStorage loaded(core_util_atomic_load(&storage.u)); + return loaded.data; + } + T load(memory_order order) const noexcept + { + AtomicSmallStorage loaded(core_util_atomic_load_explicit(&storage.u, order)); + return loaded.data; + } + void store(T desired) volatile noexcept + { + AtomicSmallStorage tostore(desired); + core_util_atomic_store(&storage.u, tostore.u); + } + void store(T desired, memory_order order) volatile noexcept + { + AtomicSmallStorage tostore(desired); + core_util_atomic_store_explicit(&storage.u, tostore.u, order); + } + void store(T desired) noexcept + { + AtomicSmallStorage tostore(desired); + core_util_atomic_store(&storage.u, tostore.u); + } + void store(T desired, memory_order order) noexcept + { + AtomicSmallStorage tostore(desired); + core_util_atomic_store_explicit(&storage.u, tostore.u, order); + } + T exchange(T desired, memory_order = memory_order_seq_cst) volatile noexcept + { + AtomicSmallStorage exchanged(desired); + exchanged.u = core_util_atomic_exchange(&storage.u, exchanged.u); + return exchanged.data; + } + bool compare_exchange_strong(T &expected, T desired) volatile noexcept + { + AtomicSmallStorage expcur(expected); + AtomicSmallStorage tostore(desired); + bool result = core_util_atomic_compare_exchange_strong(&storage.u, &expcur.u, tostore.u); + if (!result) { + expected = expcur.data; + } + return result; + } + bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + AtomicSmallStorage expcur(expected); + AtomicSmallStorage tostore(desired); + bool result = core_util_atomic_compare_exchange_strong_explicit(&storage.u, &expcur.u, tostore.u, success, failure); + if (!result) { + expected = expcur.data; + } + return result; + } + bool compare_exchange_strong(T &expected, T desired, memory_order order) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); + } + bool compare_exchange_weak(T &expected, T desired) volatile noexcept + { + AtomicSmallStorage expcur(expected); + AtomicSmallStorage tostore(desired); + bool result = core_util_atomic_compare_exchange_weak(&storage.u, &expcur.u, tostore.u); + if (!result) { + expected = expcur.data; + } + return result; + } + bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + AtomicSmallStorage expcur(expected); + AtomicSmallStorage tostore(desired); + bool result = core_util_atomic_compare_exchange_weak_explicit(&storage.u, &expcur.u, tostore.u, success, failure); + if (!result) { + expected = expcur.data; + } + return result; + } + bool compare_exchange_weak(T &expected, T desired, memory_order order) volatile noexcept + { + return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); + } +protected: + void init(T desired) volatile noexcept + { + AtomicSmallStorage tostore(desired); + memcpy(const_cast(&storage.u), &tostore.u, sizeof storage.u); + } + void init(T desired) noexcept + { + AtomicSmallStorage tostore(desired); + storage.u = std::move(tostore.u); + } +}; + +/* Template for an integer or pointer Atomic of type T, using atomic storage A + * Same functionality as AtomicBaseRaw, but can use simpler implementation using casts + * to convert between type T and the underlying storage type. Doesn't change functionality, + * compilers can generally optimise this better (will particularly help in debug build). + * C casts must be used as reinterpret_cast can't handle integer<->integer, and static_cast + * can't handle pointer<->integer. Note that + * we always have A be unsigned, so that our arithmetic is unsigned and defined on overflow. + * Compilers can and do treat signed arithmetic overflow as undefined, but not cast overflow. + * (C++20 explicitly defines assignment of unsigned to signed as 2's-complement). + * Our data field is of type T, not A, to permit value/aggregate initialisation. + */ +template> +struct AtomicBaseInt { + MBED_STRUCT_STATIC_ASSERT(sizeof(T) == sizeof(A), "AtomicBaseInt size mismatch"); + using value_type = T; + AtomicBaseInt() noexcept = default; + constexpr AtomicBaseInt(T v) noexcept : u(A(v)) + { + } + bool is_lock_free() const volatile noexcept + { + return atomic_container_is_lock_free::value; + } + T load() const volatile noexcept + { + return T(core_util_atomic_load(&u)); + } + T load(memory_order order) const volatile noexcept + { + return T(core_util_atomic_load_explicit(&u, order)); + } + T load() const noexcept + { + return T(core_util_atomic_load(&u)); + } + T load(memory_order order) const noexcept + { + return T(core_util_atomic_load_explicit(&u, order)); + } + void store(T desired) volatile noexcept + { + core_util_atomic_store(&u, A(desired)); + } + void store(T desired, memory_order order) volatile noexcept + { + core_util_atomic_store_explicit(&u, A(desired), order); + } + void store(T desired) noexcept + { + core_util_atomic_store(&u, A(desired)); + } + void store(T desired, memory_order order) noexcept + { + core_util_atomic_store_explicit(&u, A(desired), order); + } + T exchange(T desired) volatile noexcept + { + A d = A(desired); + return T(core_util_atomic_exchange(&u, d)); + } + T exchange(T desired, memory_order order) volatile noexcept + { + A d = A(desired); + return T(core_util_atomic_exchange_explicit(&u, d, order)); + } + bool compare_exchange_strong(T &expected, T desired) volatile noexcept + { + A *expcur = reinterpret_cast(&expected); + return core_util_atomic_compare_exchange_strong(&u, expcur, A(desired)); + } + bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + A *expcur = reinterpret_cast(&expected); + return core_util_atomic_compare_exchange_strong_explicit(&u, expcur, A(desired), success, failure); + } + bool compare_exchange_strong(T &expected, T desired, memory_order order) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); + } + bool compare_exchange_weak(T &expected, T desired) volatile noexcept + { + A *expcur = reinterpret_cast(&expected); + return core_util_atomic_compare_exchange_weak(&u, expcur, A(desired)); + } + bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept + { + A *expcur = reinterpret_cast(&expected); + return core_util_atomic_compare_exchange_weak_explicit(&u, expcur, A(desired), success, failure); + } + bool compare_exchange_weak(T &expected, T desired, memory_order order) volatile noexcept + { + return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); + } +protected: + A u; + void init(T desired) volatile noexcept + { + u = A(desired); + } + void init(T desired) noexcept + { + u = A(desired); + } +}; + +/* Template for an integer or pointer Atomic, including increment and + * decrement functionality. If StrideT is void, then the increment and + * decrement operators are ill-formed, as desired for Atomic. + */ +template> +struct AtomicWithAdd : public AtomicBaseInt { + using difference_type = DiffT; +#ifdef __CC_ARM + AtomicWithAdd() noexcept = default; + constexpr AtomicWithAdd(T v) noexcept : AtomicBaseInt(v) + { + } +#else + using AtomicBaseInt::AtomicBaseInt; +#endif + T operator++() volatile noexcept + { + A d = static_cast(sizeof(StrideT)); + return T(core_util_atomic_incr(&this->u, d)); + } + T operator++(int) volatile noexcept + { + A d = static_cast(sizeof(StrideT)); + return T(core_util_atomic_fetch_add(&this->u, d)); + } + T operator--() volatile noexcept + { + A d = static_cast(sizeof(StrideT)); + return T(core_util_atomic_decr(&this->u, d)); + } + T operator--(int) volatile + { + A d = static_cast(sizeof(StrideT)); + return T(core_util_atomic_fetch_sub(&this->u, d)); + } + T fetch_add(DiffT arg) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_fetch_add(&this->u, d)); + } + T fetch_add(DiffT arg, memory_order order) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_fetch_add_explicit(&this->u, d, order)); + } + T operator+=(DiffT arg) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_incr(&this->u, d)); + } + T fetch_sub(DiffT arg) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_fetch_sub(&this->u, d)); + } + T fetch_sub(DiffT arg, memory_order order) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_fetch_sub_explicit(&this->u, d, order)); + } + T operator-=(DiffT arg) volatile + { + A d = static_cast(arg * sizeof(StrideT)); + return T(core_util_atomic_decr(&this->u, d)); + } +}; + +/* Template for an integer Atomic with bitwise operations + */ +template> +struct AtomicWithBitwise : public AtomicWithAdd { +#ifdef __CC_ARM + AtomicWithBitwise() noexcept = default; + constexpr AtomicWithBitwise(T v) noexcept : AtomicWithAdd(v) + { + } +#else + using AtomicWithAdd::AtomicWithAdd; +#endif + T fetch_and(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_and(&this->u, d)); + } + T fetch_and(T arg, memory_order order) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_and_explicit(&this->u, d, order)); + } + T operator&=(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_and_fetch(&this->u, d)); + } + T fetch_or(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_or(&this->u, d)); + } + T fetch_or(T arg, memory_order order) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_or_explicit(&this->u, d, order)); + } + T operator|=(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_or_fetch(&this->u, d)); + } + T fetch_xor(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_xor(&this->u, d)); + } + T fetch_xor(T arg, memory_order order) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_fetch_xor_explicit(&this->u, d, order)); + } + T operator^=(T arg) volatile noexcept + { + A d = static_cast(arg); + return static_cast(core_util_atomic_xor_fetch(&this->u, d)); + } +}; + +/* Selector between the implementations + * bool -> AtomicBaseInt + * other integral types -> AtomicWithBitwise + * everything else -> AtomicBaseRaw + * (Pointers are specialized in the public API) + */ +// *INDENT-OFF* +template +struct AtomicSelector : mbed::type_identity> { }; + +template +struct AtomicSelector::value>> + : mbed::type_identity> { }; + +template +struct AtomicSelector::value && !std::is_same::value>> + : mbed::type_identity> { }; +// *INDENT-ON* + +template +using Atomic = typename AtomicSelector::type; + +} // namespace impl + +template +void atomic_init(volatile Atomic *obj, typename Atomic::value_type desired) noexcept; + +template +void atomic_init(Atomic *obj, typename Atomic::value_type desired) noexcept; + +/* Base template - let impl::Atomic dispatch to raw, base integer or integer-with-bitwise */ +template +struct Atomic : public impl::Atomic { + // Constraints from LWG 3012 + static_assert(std::is_trivially_copyable::value, "Atomic types must be TriviallyCopyable"); + static_assert(std::is_copy_constructible::value, "Atomic types must be CopyConstructible"); + static_assert(std::is_move_constructible::value, "Atomic types must be MoveConstructible"); + static_assert(std::is_copy_assignable::value, "Atomic types must be CopyAssignable"); + static_assert(std::is_move_assignable::value, "Atomic types must be MoveAssignable"); + Atomic() noexcept = default; + Atomic(const Atomic &) = delete; + constexpr Atomic(T v) noexcept : impl::Atomic(std::move(v)) + { + } + operator T() const volatile noexcept + { + return this->load(); + } + operator T() const noexcept + { + return this->load(); + } + T operator=(T desired) volatile noexcept + { + this->store(desired); + return desired; + } + T operator=(T desired) noexcept + { + this->store(desired); + return desired; + } + Atomic &operator=(const Atomic &) = delete; +private: + friend void atomic_init<>(volatile Atomic *obj, typename Atomic::value_type desired) noexcept; + friend void atomic_init<>(Atomic *obj, typename Atomic::value_type desired) noexcept; +}; + + +/* Pointer specialisation - support increment and decrement by ptrdiff_t, + * as long as sizeof(T) is valid to act as the stride. Annoyingly, C++11 + * doesn't provide operator->, so neither do we, so you have to say + * "aptr.load()->member" to use it to access a structure. *aptr is fine though. + */ +template +struct Atomic : public impl::AtomicWithAdd { + Atomic() noexcept = default; + Atomic(const Atomic &) = delete; + constexpr Atomic(T *v) noexcept : impl::AtomicWithAdd(v) + { + } + operator T *() const volatile noexcept + { + return this->load(); + } + operator T *() const noexcept + { + return this->load(); + } + T *operator=(T *desired) volatile noexcept + { + this->store(desired); + return desired; + } + T *operator=(T *desired) noexcept + { + this->store(desired); + return desired; + } + Atomic &operator=(const Atomic &) = delete; +private: + friend void atomic_init<>(volatile Atomic *obj, typename Atomic::value_type desired) noexcept; + friend void atomic_init<>(Atomic *obj, typename Atomic::value_type desired) noexcept; +}; + +using atomic_bool = Atomic; +using atomic_char = Atomic; +using atomic_schar = Atomic; +using atomic_uchar = Atomic; +using atomic_char16_t = Atomic; +using atomic_char32_t = Atomic; +using atomic_wchar_t = Atomic; +using atomic_short = Atomic; +using atomic_ushort = Atomic; +using atomic_int = Atomic; +using atomic_uint = Atomic; +using atomic_long = Atomic; +using atomic_ulong = Atomic; +using atomic_llong = Atomic; +using atomic_ullong = Atomic; +using atomic_int8_t = Atomic; +using atomic_uint8_t = Atomic; +using atomic_int16_t = Atomic; +using atomic_uint16_t = Atomic; +using atomic_int32_t = Atomic; +using atomic_uint32_t = Atomic; +using atomic_int64_t = Atomic; +using atomic_uint64_t = Atomic; +using atomic_int_least8_t = Atomic; +using atomic_uint_least8_t = Atomic; +using atomic_int_least16_t = Atomic; +using atomic_uint_least16_t = Atomic; +using atomic_int_least32_t = Atomic; +using atomic_uint_least32_t = Atomic; +using atomic_int_least64_t = Atomic; +using atomic_uint_least64_t = Atomic; +using atomic_int_fast8_t = Atomic; +using atomic_uint_fast8_t = Atomic; +using atomic_int_fast16_t = Atomic; +using atomic_uint_fast16_t = Atomic; +using atomic_int_fast32_t = Atomic; +using atomic_uint_fast32_t = Atomic; +using atomic_int_fast64_t = Atomic; +using atomic_uint_fast64_t = Atomic; +using atomic_intptr_t = Atomic; +using atomic_uintptr_t = Atomic; +using atomic_size_t = Atomic; +using atomic_ptrdiff_t = Atomic; +using atomic_intmax_t = Atomic; +using atomic_uintmax_t = Atomic; + +template +void atomic_init(Atomic *obj, typename Atomic::value_type desired) noexcept +{ + obj->init(desired); +} + +template +void atomic_init(volatile Atomic *obj, typename Atomic::value_type desired) noexcept +{ + obj->init(desired); +} + +template +bool atomic_is_lock_free(const Atomic *obj) noexcept +{ + return obj->is_lock_free(); +} + +template +bool atomic_is_lock_free(const volatile Atomic *obj) noexcept +{ + return obj->is_lock_free(); +} + +template +void atomic_store(Atomic *obj, typename Atomic::value_type desired) noexcept +{ + obj->store(desired); +} + +template +void atomic_store(volatile Atomic *obj, typename Atomic::value_type desired) noexcept +{ + obj->store(desired); +} + +template +void atomic_store_explicit(Atomic *obj, typename Atomic::value_type desired, memory_order order) noexcept +{ + obj->store(desired, order); +} + +template +void atomic_store_explicit(volatile Atomic *obj, typename Atomic::value_type desired, memory_order order) noexcept +{ + obj->store(desired, order); +} + +template +T atomic_load(const Atomic *obj) noexcept +{ + return obj->load(); +} + +template +T atomic_load(const volatile Atomic *obj) noexcept +{ + return obj->load(); +} + +template +T atomic_load_explicit(const Atomic *obj, memory_order order) noexcept +{ + return obj->load(order); +} + +template +T atomic_load_explicit(const volatile Atomic *obj, memory_order order) noexcept +{ + return obj->load(order); +} + +template +T atomic_exchange(Atomic *obj, typename Atomic::value_type desired) noexcept +{ + return obj->exchange(desired); +} + +template +T atomic_exchange(volatile Atomic *obj, typename Atomic::value_type desired) noexcept +{ + return obj->exchange(desired); +} + +template +T atomic_exchange_explicit(Atomic *obj, typename Atomic::value_type desired, memory_order order) noexcept +{ + return obj->exchange(desired, order); +} + +template +T atomic_exchange_explicit(volatile Atomic *obj, typename Atomic::value_type desired, memory_order order) noexcept +{ + return obj->exchange(desired, order); +} + +template +bool atomic_compare_exchange_weak(Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired) noexcept +{ + return obj->compare_exchange_weak(obj, *currentExpected, desired); +} + +template +bool atomic_compare_exchange_weak(volatile Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired) noexcept +{ + return obj->compare_exchange_weak(obj, *currentExpected, desired); +} + +template +bool atomic_compare_exchange_strong(Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired) noexcept +{ + return obj->compare_exchange_strong(obj, *currentExpected, desired); +} + +template +bool atomic_compare_exchange_strong(volatile Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired) noexcept +{ + return obj->compare_exchange_strong(obj, *currentExpected, desired); +} + +template +bool atomic_compare_exchange_weak_explicit(Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired, + memory_order success, + memory_order failure) noexcept +{ + return obj->compare_exchange_weak(obj, *currentExpected, desired, success, failure); +} + +template +bool atomic_compare_exchange_weak_explicit(volatile Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired, + memory_order success, + memory_order failure) noexcept +{ + return obj->compare_exchange_weak(obj, *currentExpected, desired, success, failure); +} + +template +bool atomic_compare_exchange_strong_explicit(Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired, + memory_order success, + memory_order failure) noexcept +{ + return obj->compare_exchange_strong(obj, *currentExpected, desired, success, failure); +} + +template +bool atomic_compare_exchange_strong_explicit(volatile Atomic *obj, + typename Atomic::value_type *currentExpected, + typename Atomic::value_type desired, + memory_order success, + memory_order failure) noexcept +{ + return obj->compare_exchange_strong(obj, *currentExpected, desired, success, failure); +} + +template +T atomic_fetch_add(Atomic *obj, typename Atomic::difference_type arg) noexcept +{ + return obj->fetch_add(arg); +} + +template +T atomic_fetch_add(volatile Atomic *obj, typename Atomic::difference_type arg) noexcept +{ + return obj->fetch_add(arg); +} + +template +T atomic_fetch_add_explicit(Atomic *obj, typename Atomic::difference_type arg, memory_order order) noexcept +{ + return obj->fetch_add(arg, order); +} + +template +T atomic_fetch_add_explicit(volatile Atomic *obj, typename Atomic::difference_type arg, memory_order order) noexcept +{ + return obj->fetch_add(arg, order); +} + +template +T atomic_fetch_sub(Atomic *obj, typename Atomic::difference_type arg) noexcept +{ + return obj->fetch_sub(arg); +} + +template +T atomic_fetch_sub(volatile Atomic *obj, typename Atomic::difference_type arg) noexcept +{ + return obj->fetch_sub(arg); +} + +template +T atomic_fetch_sub_explicit(Atomic *obj, typename Atomic::difference_type arg, memory_order order) noexcept +{ + return obj->fetch_sub(arg, order); +} + +template +T atomic_fetch_sub_explicit(volatile Atomic *obj, typename Atomic::difference_type arg, memory_order order) noexcept +{ + return obj->fetch_sub(arg, order); +} + +template +T atomic_fetch_and(Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_and(arg); +} + +template +T atomic_fetch_and(volatile Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_and(arg); +} + +template +T atomic_fetch_and_explicit(Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_and(arg, order); +} + +template +T atomic_fetch_and_explicit(volatile Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_and(arg, order); +} + +template +T atomic_fetch_or(Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_or(arg); +} + +template +T atomic_fetch_or(volatile Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_or(arg); +} + +template +T atomic_fetch_or_explicit(Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_or(arg, order); +} + +template +T atomic_fetch_or_explicit(volatile Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_or(arg, order); +} + +template +T atomic_fetch_xor(Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_xor(arg); +} + +template +T atomic_fetch_xor(volatile Atomic *obj, typename Atomic::value_type arg) noexcept +{ + return obj->fetch_xor(arg); +} + +template +T atomic_fetch_xor_explicit(Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_xor(arg, order); +} + +template +T atomic_fetch_xor_explicit(volatile Atomic *obj, typename Atomic::value_type arg, memory_order order) noexcept +{ + return obj->fetch_xor(arg, order); +} + +struct atomic_flag { + atomic_flag() noexcept = default; + atomic_flag(const atomic_flag &) = delete; + atomic_flag &operator=(const atomic_flag &) = delete; + atomic_flag &operator=(const atomic_flag &) volatile = delete; + bool test_and_set() volatile noexcept + { + return core_util_atomic_flag_test_and_set(&_flag); + } + bool test_and_set(memory_order order) volatile noexcept + { + return core_util_atomic_flag_test_and_set_explicit(&_flag, order); + } + void clear() volatile noexcept + { + core_util_atomic_flag_clear(&_flag); + } + void clear() noexcept + { + core_util_atomic_flag_clear(&_flag); + } + void clear(memory_order order) volatile noexcept + { + core_util_atomic_flag_clear_explicit(&_flag, order); + } + void clear(memory_order order) noexcept + { + core_util_atomic_flag_clear_explicit(&_flag, order); + } +private: + core_util_atomic_flag _flag; +}; + +MBED_FORCEINLINE bool atomic_flag_test_and_set(volatile atomic_flag *flag) noexcept +{ + return flag->test_and_set(); +} + +MBED_FORCEINLINE bool atomic_flag_test_and_set(atomic_flag *flag) noexcept +{ + return flag->test_and_set(); +} + +MBED_FORCEINLINE bool atomic_flag_test_and_set_explicit(volatile atomic_flag *flag, memory_order order) noexcept +{ + return flag->test_and_set(order); +} + +MBED_FORCEINLINE bool atomic_flag_test_and_set_explicit(atomic_flag *flag, memory_order order) noexcept +{ + return flag->test_and_set(order); +} + +MBED_FORCEINLINE void atomic_flag_clear(volatile atomic_flag *flag) noexcept +{ + flag->clear(); +} + +MBED_FORCEINLINE void atomic_flag_clear(atomic_flag *flag) noexcept +{ + flag->clear(); +} + +MBED_FORCEINLINE void atomic_flag_clear_explicit(volatile atomic_flag *flag, memory_order order) noexcept +{ + flag->clear(order); +} + +MBED_FORCEINLINE void atomic_flag_clear_explicit(atomic_flag *flag, memory_order order) noexcept +{ + flag->clear(order); +} + +#define MBED_ATOMIC_FLAG_INIT { CORE_UTIL_ATOMIC_FLAG_INIT } + +template +T kill_dependency(T y) noexcept +{ + return y; +} + +MBED_FORCEINLINE void atomic_signal_fence(memory_order order) noexcept +{ + if (order != memory_order_relaxed) { + MBED_COMPILER_BARRIER(); + } +} + +MBED_FORCEINLINE void atomic_thread_fence(memory_order order) noexcept +{ + if (order != memory_order_relaxed) { + MBED_BARRIER(); + } +} +/**@}*/ + +/**@}*/ + +} // namespace mbed + +#endif diff --git a/platform/internal/mbed_atomic_impl.h b/platform/internal/mbed_atomic_impl.h index f169663..05c3f13 100644 --- a/platform/internal/mbed_atomic_impl.h +++ b/platform/internal/mbed_atomic_impl.h @@ -132,14 +132,49 @@ #endif #ifdef __CC_ARM -#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ + __asm { \ + LDREX##M newValue, [valuePtr] ; \ + OP newValue, arg ; \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ + __asm volatile ( \ + "LDREX"#M "\t%[newValue], %[value]\n\t" \ + #OP "\t%[newValue], %[arg]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [newValue] "=&" MBED_DOP_REG (newValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [arg] Constants MBED_DOP_REG (arg) \ + : "cc" \ + ) +#elif defined __ICCARM__ +/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ +/* IAR does not support "ADDS reg, reg", so write as 3-operand */ +#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ + asm volatile ( \ + "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \ + #OP "\t%[newValue], %[newValue], %[arg]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [newValue] "=&r" (newValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [arg] "r" (arg) \ + : "memory", "cc" \ + ) +#endif + +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ __asm { \ LDREX##M oldValue, [valuePtr] ; \ OP newValue, oldValue, arg ; \ STREX##M fail, newValue, [valuePtr] \ } #elif defined __clang__ || defined __GNUC__ -#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ __asm volatile ( \ ".syntax unified\n\t" \ "LDREX"#M "\t%[oldValue], %[value]\n\t" \ @@ -154,7 +189,7 @@ ) #elif defined __ICCARM__ /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ -#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ asm volatile ( \ "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ #OP "\t%[newValue], %[oldValue], %[arg]\n" \ @@ -172,7 +207,7 @@ * are only 2-operand versions of the instructions. */ #ifdef __CC_ARM -#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ __asm { \ LDREX##M oldValue, [valuePtr] ; \ MOV newValue, oldValue ; \ @@ -180,7 +215,7 @@ STREX##M fail, newValue, [valuePtr] \ } #elif defined __clang__ || defined __GNUC__ -#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ __asm volatile ( \ ".syntax unified\n\t" \ "LDREX"#M "\t%[oldValue], %[value]\n\t" \ @@ -195,7 +230,7 @@ : "cc" \ ) #elif defined __ICCARM__ -#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ asm volatile ( \ "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ "MOV" "\t%[newValue], %[oldValue]\n" \ @@ -444,17 +479,41 @@ } -#define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +#define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ +inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + uint32_t fail, newValue; \ + MBED_BARRIER(); \ + do { \ + DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_BARRIER(); \ + return (T) newValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + uint32_t fail, newValue; \ + MBED_RELEASE_BARRIER(order); \ + do { \ + DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_ACQUIRE_BARRIER(order); \ + return (T) newValue; \ +} \ + +#define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ { \ T oldValue; \ uint32_t fail, newValue; \ MBED_BARRIER(); \ do { \ - DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ } while (fail); \ MBED_BARRIER(); \ - return (T) retValue; \ + return oldValue; \ } \ \ MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ @@ -464,22 +523,22 @@ uint32_t fail, newValue; \ MBED_RELEASE_BARRIER(order); \ do { \ - DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ } while (fail); \ MBED_ACQUIRE_BARRIER(order); \ - return (T) retValue; \ + return oldValue; \ } \ -#define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +#define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \ inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ T oldValue; \ uint32_t fail, newValue; \ MBED_BARRIER(); \ do { \ - DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ } while (fail); \ MBED_BARRIER(); \ - return (T) retValue; \ + return oldValue; \ } \ \ MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ @@ -489,10 +548,10 @@ uint32_t fail, newValue; \ MBED_RELEASE_BARRIER(order); \ do { \ - DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ } while (fail); \ MBED_ACQUIRE_BARRIER(order); \ - return (T) retValue; \ + return oldValue; \ } \ inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr) @@ -526,15 +585,20 @@ DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) -#define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \ - DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \ - DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \ - DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, ) +#define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \ + DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, ) -#define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \ - DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \ - DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \ - DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, ) +#define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \ + DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \ + DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, ) #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ @@ -546,6 +610,11 @@ DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) +// Note that these macros define a number of functions that are +// not in mbed_atomic.h, like core_util_atomic_and_fetch_u16. +// These are not documented via the doxygen in mbed_atomic.h, so +// for now should be regarded as internal only. They are used by the +// Atomic template as an optimisation though. // We always use the "S" form of operations - avoids yet another // possible unneeded distinction between Thumbv1 and Thumbv2, and @@ -559,33 +628,42 @@ // of the 16-bit forms. Shame we can't specify "don't care" // for the "S", or get the GNU multi-alternative to // choose ADDS/ADD appropriately. -DO_MBED_LOCKFREE_3OPS(incr, ADDS, "IL", newValue) -DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue) -DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue) -DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL") +DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL") +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL") +DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL") // K constraint is inverted 12-bit modified immediate constant // (relying on assembler substituting BIC for AND) -DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK") +DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK") #if MBED_EXCLUSIVE_ACCESS_ARM // ARM does not have ORN instruction, so take plain immediates. -DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "I", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "I") +DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I") #else // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR. -DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "IK", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "IK") +DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK") #endif // I constraint is 12-bit modified immediate operand -DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS, "I", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I") +DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I") #else // MBED_EXCLUSIVE_ACCESS_THUMB1 +// I constraint is 0-255; J is -255 to -1, suitable for +// 2-op ADD/SUB (relying on assembler to swap ADD/SUB) // L constraint is -7 to +7, suitable for 3-op ADD/SUB // (relying on assembler to swap ADD/SUB) -DO_MBED_LOCKFREE_3OPS(incr, ADDS, "L", newValue) -DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue) -DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue) -DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue) -DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue) -DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue) -DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue) +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L") +DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ") +DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L") +DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ") +DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "") +DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "") +DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "") +DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "") +DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "") +DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "") #endif DO_MBED_LOCKFREE_EXCHG_OPS() @@ -1011,49 +1089,49 @@ */ #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ template<> \ -inline T core_util_atomic_load(const volatile T *valuePtr) \ +inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \ { \ return core_util_atomic_load_##fn_suffix(valuePtr); \ } \ \ template<> \ -inline T core_util_atomic_load(const T *valuePtr) \ +inline T core_util_atomic_load(const T *valuePtr) noexcept \ { \ return core_util_atomic_load_##fn_suffix(valuePtr); \ } \ \ template<> \ -inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \ +inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \ { \ return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ } \ \ template<> \ -inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \ +inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \ { \ return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ } template -inline T *core_util_atomic_load(T *const volatile *valuePtr) +inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept { return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr); } template -inline T *core_util_atomic_load(T *const *valuePtr) +inline T *core_util_atomic_load(T *const *valuePtr) noexcept { return (T *) core_util_atomic_load_ptr((void *const *) valuePtr); } template -inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) +inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept { return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order); } template -inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) +inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept { return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order); } @@ -1070,49 +1148,49 @@ #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ template<> \ -inline void core_util_atomic_store(volatile T *valuePtr, T val) \ +inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \ { \ core_util_atomic_store_##fn_suffix(valuePtr, val); \ } \ \ template<> \ -inline void core_util_atomic_store(T *valuePtr, T val) \ +inline void core_util_atomic_store(T *valuePtr, T val) noexcept \ { \ core_util_atomic_store_##fn_suffix(valuePtr, val); \ } \ \ template<> \ -inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \ +inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \ { \ core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ } \ \ template<> \ -inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \ +inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \ { \ core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ } template -inline void core_util_atomic_store(T *volatile *valuePtr, T *val) +inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept { core_util_atomic_store_ptr((void *volatile *) valuePtr, val); } template -inline void core_util_atomic_store(T **valuePtr, T *val) +inline void core_util_atomic_store(T **valuePtr, T *val) noexcept { core_util_atomic_store_ptr((void **) valuePtr, val); } template -inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) +inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept { core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order); } template -inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) +inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept { core_util_atomic_store_ptr((void **) valuePtr, val, order); } @@ -1129,19 +1207,19 @@ #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ template<> inline \ -bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \ { \ return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ } template -inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept { return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); } template -inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept { return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); } @@ -1162,63 +1240,63 @@ #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ template<> \ -inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \ +inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ { \ return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ } \ \ template<> \ inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ - mbed_memory_order order) \ + mbed_memory_order order) noexcept \ { \ return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ } template<> -inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) +inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept { return core_util_atomic_exchange_bool(valuePtr, arg); } template<> -inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) +inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept { return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order); } template -inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) +inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept { return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg); } template -inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) +inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept { return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order); } template -inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) +inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept { return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T)); } template -inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept { return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); } template -inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) +inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept { return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T)); } template -inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept { return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); } @@ -1236,6 +1314,20 @@ DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) +#define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \ +template<> \ +inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ +{ \ + return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \ +} \ + \ +template<> \ +inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ + mbed_memory_order order) noexcept \ +{ \ + return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \ +} + DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange) DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange) DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add) @@ -1246,25 +1338,61 @@ DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or) DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor) +namespace mbed { +namespace impl { + +// Use custom assembler forms for pre-ops where available, else construct from post-ops +#if MBED_EXCLUSIVE_ACCESS +#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ + template T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ + template T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ + DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) +#else +#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ + template T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ + template T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ + DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \ + DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \ + DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \ + DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) +#endif + +// *INDENT-OFF* +DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg) +DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg) +DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg) +DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg) +DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg) +// *INDENT-ON* + +} +} + #endif // __cplusplus #undef MBED_DOP_REG #undef MBED_CMP_IMM #undef MBED_SUB3_IMM #undef DO_MBED_LOCKFREE_EXCHG_ASM -#undef DO_MBED_LOCKFREE_3OP_ASM -#undef DO_MBED_LOCKFREE_2OP_ASM +#undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM +#undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM +#undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM #undef DO_MBED_LOCKFREE_LOADSTORE #undef DO_MBED_LOCKFREE_EXCHG_OP #undef DO_MBED_LOCKFREE_CAS_WEAK_OP #undef DO_MBED_LOCKFREE_CAS_STRONG_OP -#undef DO_MBED_LOCKFREE_2OP -#undef DO_MBED_LOCKFREE_3OP +#undef DO_MBED_LOCKFREE_NEWVAL_2OP +#undef DO_MBED_LOCKFREE_OLDVAL_2OP +#undef DO_MBED_LOCKFREE_OLDVAL_3OP #undef DO_MBED_LOCKFREE_EXCHG_OPS -#undef DO_MBED_LOCKFREE_2OPS -#undef DO_MBED_LOCKFREE_3OPS +#undef DO_MBED_LOCKFREE_NEWVAL_2OPS +#undef DO_MBED_LOCKFREE_OLDVAL_2OPS +#undef DO_MBED_LOCKFREE_OLDVAL_3OPS #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS #undef DO_MBED_SIGNED_CAS_OP diff --git a/platform/mbed_atomic.h b/platform/mbed_atomic.h index ebdba7e..780a485 100644 --- a/platform/mbed_atomic.h +++ b/platform/mbed_atomic.h @@ -880,6 +880,8 @@ #ifdef __cplusplus } // extern "C" +#include "mbed_cxxsupport.h" + // For each operation, two overloaded templates: // * one for non-pointer types, which has implementations based on the // u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation. @@ -887,100 +889,99 @@ // // Templates use standard C/C++ naming - old incr/decr/cas forms are not provided. // -// Note that C++ template selection somewhat inhibits the ease of use of these templates. -// Ambiguities arise with setting pointers to NULL, or adding constants to integers. -// It may be necessary to cast the argument or desired value to the correct type, or -// explictly specify the type - eg core_util_atomic_store(&fh, NULL) or -// core_util_atomic_store(&val, (uint8_t)1). -// A proper mbed::Atomic class would solve the issue. +// The `type_identity_t` used here means "same type as T", blocking template +// argument deduction. It forces type selection based on the type of the actual pointer +// to the atomic. If just `T` was used, the following would be ambiguous: +// core_util_atomic_store(&my_uint8_t, 1) - it wouldn't be able to select between T +// being uint8_t and int. /** \copydoc core_util_atomic_load_u8 */ -template T core_util_atomic_load(const volatile T *valuePtr); +template T core_util_atomic_load(const volatile T *valuePtr) noexcept; /** \copydoc core_util_atomic_load_u8 */ -template T core_util_atomic_load(const T *valuePtr); +template T core_util_atomic_load(const T *valuePtr) noexcept; /** \copydoc core_util_atomic_store_u8 */ -template void core_util_atomic_store(volatile T *valuePtr, T desiredValue); +template void core_util_atomic_store(volatile T *valuePtr, mbed::type_identity_t desiredValue) noexcept; /** \copydoc core_util_atomic_store_u8 */ -template void core_util_atomic_store(T *valuePtr, T desiredValue); +template void core_util_atomic_store(T *valuePtr, mbed::type_identity_t desiredValue) noexcept; /** \copydoc core_util_atomic_exchange_u8 */ -template T core_util_atomic_exchange(volatile T *ptr, T desiredValue); +template T core_util_atomic_exchange(volatile T *ptr, mbed::type_identity_t desiredValue) noexcept; /** \copydoc core_util_atomic_cas_u8 */ -template bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +template bool core_util_atomic_compare_exchange_strong(volatile T *ptr, mbed::type_identity_t *expectedCurrentValue, mbed::type_identity_t desiredValue) noexcept; /** \copydoc core_util_atomic_compare_exchange_weak_u8 */ -template bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +template bool core_util_atomic_compare_exchange_weak(volatile T *ptr, mbed::type_identity_t *expectedCurrentValue, mbed::type_identity_t desiredValue) noexcept; /** \copydoc core_util_fetch_add_u8 */ -template T core_util_atomic_fetch_add(volatile T *valuePtr, T arg); +template T core_util_atomic_fetch_add(volatile T *valuePtr, mbed::type_identity_t arg) noexcept; /** \copydoc core_util_fetch_sub_u8 */ -template T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg); +template T core_util_atomic_fetch_sub(volatile T *valuePtr, mbed::type_identity_t arg) noexcept; /** \copydoc core_util_fetch_and_u8 */ -template T core_util_atomic_fetch_and(volatile T *valuePtr, T arg); +template T core_util_atomic_fetch_and(volatile T *valuePtr, mbed::type_identity_t arg) noexcept; /** \copydoc core_util_fetch_or_u8 */ -template T core_util_atomic_fetch_or(volatile T *valuePtr, T arg); +template T core_util_atomic_fetch_or(volatile T *valuePtr, mbed::type_identity_t arg) noexcept; /** \copydoc core_util_fetch_xor_u8 */ -template T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg); +template T core_util_atomic_fetch_xor(volatile T *valuePtr, mbed::type_identity_t arg) noexcept; /** \copydoc core_util_atomic_load_explicit_u8 */ -template T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order); +template T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_load_explicit_u8 */ -template T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order); +template T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_store_explicit_u8 */ -template void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order); +template void core_util_atomic_store_explicit(volatile T *valuePtr, mbed::type_identity_t desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_store_explicit_u8 */ -template void core_util_atomic_store_explicit(T *valuePtr, T desiredValue, mbed_memory_order order); +template void core_util_atomic_store_explicit(T *valuePtr, mbed::type_identity_t desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_exchange_explicit_u8 */ -template T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order); +template T core_util_atomic_exchange_explicit(volatile T *ptr, mbed::type_identity_t desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_cas_explicit_u8 */ -template bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +template bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, mbed::type_identity_t *expectedCurrentValue, mbed::type_identity_t desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept; /** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ -template bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +template bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, mbed::type_identity_t *expectedCurrentValue, mbed::type_identity_t desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept; /** \copydoc core_util_fetch_add_explicit_u8 */ -template T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +template T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, mbed::type_identity_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_fetch_sub_explicit_u8 */ -template T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +template T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, mbed::type_identity_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_fetch_and_explicit_u8 */ -template T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +template T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, mbed::type_identity_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_fetch_or_explicit_u8 */ -template T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +template T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, mbed::type_identity_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_fetch_xor_explicit_u8 */ -template T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +template T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, mbed::type_identity_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_load_ptr */ -template inline T *core_util_atomic_load(T *const volatile *valuePtr); +template inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept; /** \copydoc core_util_atomic_load_ptr */ -template inline T *core_util_atomic_load(T *const *valuePtr); +template inline T *core_util_atomic_load(T *const *valuePtr) noexcept; /** \copydoc core_util_atomic_store_ptr */ -template inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue); +template inline void core_util_atomic_store(T *volatile *valuePtr, mbed::type_identity_t *desiredValue) noexcept; /** \copydoc core_util_atomic_store_ptr */ -template inline void core_util_atomic_store(T **valuePtr, T *desiredValue); +template inline void core_util_atomic_store(T **valuePtr, mbed::type_identity_t *desiredValue) noexcept; /** \copydoc core_util_atomic_exchange_ptr */ -template inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue); +template inline T *core_util_atomic_exchange(T *volatile *valuePtr, mbed::type_identity_t *desiredValue) noexcept; /** \copydoc core_util_atomic_cas_ptr */ -template inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +template inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, mbed::type_identity_t **expectedCurrentValue, mbed::type_identity_t *desiredValue) noexcept; /** \copydoc core_util_atomic_compare_exchange_weak_ptr */ -template inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +template inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, mbed::type_identity_t **expectedCurrentValue, mbed::type_identity_t *desiredValue) noexcept; /** \copydoc core_util_fetch_add_ptr */ -template inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg); +template inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept; /** \copydoc core_util_fetch_sub_ptr */ -template inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg); +template inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept; /** \copydoc core_util_atomic_load_explicit_ptr */ -template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order); +template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_load_explicit_ptr */ -template inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order); +template inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_store_explicit_ptr */ -template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, mbed::type_identity_t *desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_store_explicit_ptr */ -template inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue, mbed_memory_order order); +template inline void core_util_atomic_store_explicit(T **valuePtr, mbed::type_identity_t *desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_exchange_explicit_ptr */ -template inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +template inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, mbed::type_identity_t *desiredValue, mbed_memory_order order) noexcept; /** \copydoc core_util_atomic_cas_explicit_ptr */ -template inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +template inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, mbed::type_identity_t **expectedCurrentValue, mbed::type_identity_t *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept; /** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */ -template inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +template inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, mbed::type_identity_t **expectedCurrentValue, mbed::type_identity_t *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept; /** \copydoc core_util_fetch_add_explicit_ptr */ -template inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); +template inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept; /** \copydoc core_util_fetch_sub_explicit_ptr */ -template inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); +template inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept; #endif // __cplusplus