diff --git a/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake b/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake index 044bc7b..3b6c2b1 100644 --- a/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake +++ b/UNITTESTS/features/lorawan/lorawanstack/unittest.cmake @@ -35,7 +35,7 @@ stubs/LoRaPHY_stub.cpp stubs/LoRaMac_stub.cpp stubs/mbed_assert_stub.c - stubs/mbed_critical_stub.c + stubs/mbed_atomic_stub.c stubs/LoRaMacCrypto_stub.cpp stubs/LoRaMacChannelPlan_stub.cpp stubs/LoRaWANTimer_stub.cpp diff --git a/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake b/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake index d049b86..8b66364 100644 --- a/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/DTLSSocket/unittest.cmake @@ -22,6 +22,7 @@ features/netsocket/DTLSSocket/test_DTLSSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake b/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake index e0bea91..5fc1e82 100644 --- a/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake +++ b/UNITTESTS/features/netsocket/DTLSSocketWrapper/unittest.cmake @@ -21,6 +21,7 @@ features/netsocket/DTLSSocketWrapper/test_DTLSSocketWrapper.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake b/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake index 7811f3a..39575a6 100644 --- a/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/InternetSocket/unittest.cmake @@ -18,6 +18,7 @@ features/netsocket/InternetSocket/test_InternetSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TCPServer/unittest.cmake b/UNITTESTS/features/netsocket/TCPServer/unittest.cmake index 7e81e49..e8c4572 100644 --- a/UNITTESTS/features/netsocket/TCPServer/unittest.cmake +++ b/UNITTESTS/features/netsocket/TCPServer/unittest.cmake @@ -22,6 +22,7 @@ set(unittest-test-sources stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake b/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake index 21c5e05..f21779b 100644 --- a/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/TCPSocket/unittest.cmake @@ -19,6 +19,7 @@ features/netsocket/TCPSocket/test_TCPSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake b/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake index 43b0319..9e0b906 100644 --- a/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/TLSSocket/unittest.cmake @@ -21,6 +21,7 @@ features/netsocket/TLSSocket/test_TLSSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake b/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake index 2a86d3f..2ff13c2 100644 --- a/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake +++ b/UNITTESTS/features/netsocket/TLSSocketWrapper/unittest.cmake @@ -20,6 +20,7 @@ features/netsocket/TLSSocketWrapper/test_TLSSocketWrapper.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c ../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c diff --git a/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake b/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake index 3646338..99a5900 100644 --- a/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake +++ b/UNITTESTS/features/netsocket/UDPSocket/unittest.cmake @@ -19,6 +19,7 @@ features/netsocket/UDPSocket/test_UDPSocket.cpp stubs/Mutex_stub.cpp stubs/mbed_assert_stub.c + stubs/mbed_atomic_stub.c stubs/mbed_critical_stub.c stubs/equeue_stub.c stubs/EventQueue_stub.cpp diff --git a/UNITTESTS/stubs/mbed_atomic_stub.c b/UNITTESTS/stubs/mbed_atomic_stub.c new file mode 100644 index 0000000..819a6ed --- /dev/null +++ b/UNITTESTS/stubs/mbed_atomic_stub.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2017, Arm Limited and affiliates. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "platform/mbed_atomic.h" + +bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) +{ + return false; +} + +bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) +{ + return false; +} + +bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) +{ + return false; +} + + +bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) +{ + return false; +} + + +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) +{ + return 0; +} + +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) +{ + return 0; +} + +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) +{ + return 0; +} + + +uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) +{ + return 0; +} + +uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) +{ + return 0; +} + +uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) +{ + return 0; +} + + +uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) +{ + return 0; +} + +uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) +{ + return 0; +} + +uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg) +{ + return 0; +} + +uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg) +{ + return 0; +} + +uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg) +{ + return 0; +} + + +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + return 0; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ +} + +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + return 0; +} + +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + return false; +} + +bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + return false; +} + +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg) +{ + return 0; +} + +/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */ +extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); +extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); diff --git a/UNITTESTS/stubs/mbed_critical_stub.c b/UNITTESTS/stubs/mbed_critical_stub.c index b13bf61..f86ccb8 100644 --- a/UNITTESTS/stubs/mbed_critical_stub.c +++ b/UNITTESTS/stubs/mbed_critical_stub.c @@ -24,8 +24,6 @@ #include "platform/mbed_critical.h" #include "platform/mbed_toolchain.h" -static volatile uint32_t critical_section_reentrancy_counter = 0; - bool core_util_are_interrupts_enabled(void) { return false; @@ -48,124 +46,3 @@ void core_util_critical_section_exit(void) { } - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - return false; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - return false; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - return false; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - return false; -} - - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) -{ - return 0; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) -{ - return 0; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) -{ - return 0; -} - - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - return 0; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - return 0; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - return 0; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - return 0; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - return 0; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - return 0; -} - - -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) -{ - return 0; -} - -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ -} - -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - return 0; -} - -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) -{ - return false; -} - -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - return 0; -} - -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - return 0; -} - - -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) -{ - return false; -} - -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) -{ - return NULL; -} - -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return NULL; -} - -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return NULL; -} - diff --git a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c index c4a5778..dbea860 100644 --- a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c +++ b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/handles_manager.c @@ -19,7 +19,7 @@ #include "psa_defs.h" #include "cmsis_os2.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "spm_internal.h" #include "spm_panic.h" #include "handles_manager.h" diff --git a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c index f98a0ba..94e070f 100644 --- a/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c +++ b/components/TARGET_PSA/TARGET_MBED_SPM/COMPONENT_SPE/spm_common.c @@ -16,7 +16,7 @@ */ #include "cmsis_os2.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "psa_defs.h" #include "spm_internal.h" #include "spm_panic.h" diff --git a/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp b/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp index 70cdc44..d6039be 100644 --- a/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp +++ b/components/storage/blockdevice/COMPONENT_DATAFLASH/DataFlashBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "DataFlashBlockDevice.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include diff --git a/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp b/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp index 99f5b67..86c8a0b 100644 --- a/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp +++ b/components/storage/blockdevice/COMPONENT_FLASHIAP/FlashIAPBlockDevice.cpp @@ -17,7 +17,7 @@ #if DEVICE_FLASH #include "FlashIAPBlockDevice.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_error.h" using namespace mbed; diff --git a/components/wifi/esp8266-driver/ESP8266Interface.cpp b/components/wifi/esp8266-driver/ESP8266Interface.cpp index 9fc7fe1..f690cb1 100644 --- a/components/wifi/esp8266-driver/ESP8266Interface.cpp +++ b/components/wifi/esp8266-driver/ESP8266Interface.cpp @@ -26,7 +26,7 @@ #include "features/netsocket/nsapi_types.h" #include "mbed_trace.h" #include "platform/Callback.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_debug.h" #include "platform/mbed_wait_api.h" diff --git a/features/lorawan/LoRaWANStack.h b/features/lorawan/LoRaWANStack.h index 8d0d182..dbdcac0 100644 --- a/features/lorawan/LoRaWANStack.h +++ b/features/lorawan/LoRaWANStack.h @@ -42,7 +42,7 @@ #include #include "events/EventQueue.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/Callback.h" #include "platform/NonCopyable.h" #include "platform/ScopedLock.h" diff --git a/features/netsocket/InternetSocket.cpp b/features/netsocket/InternetSocket.cpp index 4f08bc9..926ba8b 100644 --- a/features/netsocket/InternetSocket.cpp +++ b/features/netsocket/InternetSocket.cpp @@ -15,6 +15,7 @@ */ #include "InternetSocket.h" +#include "platform/mbed_critical.h" #include "platform/Callback.h" using namespace mbed; diff --git a/features/netsocket/InternetSocket.h b/features/netsocket/InternetSocket.h index ab41df9..6f1dc3e 100644 --- a/features/netsocket/InternetSocket.h +++ b/features/netsocket/InternetSocket.h @@ -25,7 +25,7 @@ #include "rtos/Mutex.h" #include "rtos/EventFlags.h" #include "Callback.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_toolchain.h" #include "SocketStats.h" diff --git a/features/storage/blockdevice/BufferedBlockDevice.cpp b/features/storage/blockdevice/BufferedBlockDevice.cpp index 23ec5db..4eeb7bf 100644 --- a/features/storage/blockdevice/BufferedBlockDevice.cpp +++ b/features/storage/blockdevice/BufferedBlockDevice.cpp @@ -16,7 +16,7 @@ #include "BufferedBlockDevice.h" #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include diff --git a/features/storage/blockdevice/ChainingBlockDevice.cpp b/features/storage/blockdevice/ChainingBlockDevice.cpp index c5e31b3..fa9f036 100644 --- a/features/storage/blockdevice/ChainingBlockDevice.cpp +++ b/features/storage/blockdevice/ChainingBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "ChainingBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_assert.h" namespace mbed { diff --git a/features/storage/blockdevice/ExhaustibleBlockDevice.cpp b/features/storage/blockdevice/ExhaustibleBlockDevice.cpp index a19d4bb..aeb5eb5 100644 --- a/features/storage/blockdevice/ExhaustibleBlockDevice.cpp +++ b/features/storage/blockdevice/ExhaustibleBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "ExhaustibleBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_assert.h" namespace mbed { diff --git a/features/storage/blockdevice/FlashSimBlockDevice.cpp b/features/storage/blockdevice/FlashSimBlockDevice.cpp index 04b30a9..6159130 100644 --- a/features/storage/blockdevice/FlashSimBlockDevice.cpp +++ b/features/storage/blockdevice/FlashSimBlockDevice.cpp @@ -16,7 +16,7 @@ #include "FlashSimBlockDevice.h" #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include #include diff --git a/features/storage/blockdevice/HeapBlockDevice.cpp b/features/storage/blockdevice/HeapBlockDevice.cpp index c3c682e..8dddb7e 100644 --- a/features/storage/blockdevice/HeapBlockDevice.cpp +++ b/features/storage/blockdevice/HeapBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "HeapBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include #include diff --git a/features/storage/blockdevice/MBRBlockDevice.cpp b/features/storage/blockdevice/MBRBlockDevice.cpp index cf4db6a..1e65305 100644 --- a/features/storage/blockdevice/MBRBlockDevice.cpp +++ b/features/storage/blockdevice/MBRBlockDevice.cpp @@ -15,7 +15,7 @@ */ #include "MBRBlockDevice.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_toolchain.h" #include "platform/mbed_assert.h" #include diff --git a/features/storage/nvstore/source/nvstore.cpp b/features/storage/nvstore/source/nvstore.cpp index 41b2603..a503ffd 100644 --- a/features/storage/nvstore/source/nvstore.cpp +++ b/features/storage/nvstore/source/nvstore.cpp @@ -22,7 +22,7 @@ #include "FlashIAP.h" #include "SystemStorage.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #include "mbed_assert.h" #include "mbed_error.h" #include "mbed_wait_api.h" diff --git a/mbed.h b/mbed.h index e247e1d..b66b6b7 100644 --- a/mbed.h +++ b/mbed.h @@ -86,6 +86,7 @@ #include "drivers/InterruptIn.h" #include "platform/mbed_wait_api.h" #include "hal/sleep_api.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_power_mgmt.h" #include "platform/mbed_rtc_time.h" #include "platform/mbed_poll.h" diff --git a/platform/CircularBuffer.h b/platform/CircularBuffer.h index d1b15e7..77e8bb4 100644 --- a/platform/CircularBuffer.h +++ b/platform/CircularBuffer.h @@ -17,6 +17,7 @@ #ifndef MBED_CIRCULARBUFFER_H #define MBED_CIRCULARBUFFER_H +#include #include "platform/mbed_critical.h" #include "platform/mbed_assert.h" diff --git a/platform/DeepSleepLock.h b/platform/DeepSleepLock.h index 37aa983..1fe95db 100644 --- a/platform/DeepSleepLock.h +++ b/platform/DeepSleepLock.h @@ -19,7 +19,7 @@ #include #include "platform/mbed_power_mgmt.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" namespace mbed { diff --git a/platform/SharedPtr.h b/platform/SharedPtr.h index 0a78931..edaa819 100644 --- a/platform/SharedPtr.h +++ b/platform/SharedPtr.h @@ -23,7 +23,7 @@ #include #include -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" namespace mbed { diff --git a/platform/SingletonPtr.h b/platform/SingletonPtr.h index 5cb109e..9d2cc66 100644 --- a/platform/SingletonPtr.h +++ b/platform/SingletonPtr.h @@ -28,7 +28,7 @@ #include #include #include "platform/mbed_assert.h" -#include "platform/mbed_critical.h" +#include "platform/mbed_atomic.h" #ifdef MBED_CONF_RTOS_PRESENT #include "cmsis_os2.h" #endif diff --git a/platform/internal/mbed_atomic_impl.c b/platform/internal/mbed_atomic_impl.c new file mode 100644 index 0000000..5087ae6 --- /dev/null +++ b/platform/internal/mbed_atomic_impl.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2019, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "platform/mbed_assert.h" +#include "platform/mbed_atomic.h" +#include "platform/mbed_critical.h" + +/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */ +MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte"); + +/* Inline implementations in the header use uint32_t versions to manipulate pointers */ +MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit"); + + +#define DO_MBED_LOCKED_OP(name, OP, retValue, T, fn_suffix) \ +T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + T oldValue, newValue; \ + core_util_critical_section_enter(); \ + oldValue = *valuePtr; \ + newValue = OP; \ + *valuePtr = newValue; \ + core_util_critical_section_exit(); \ + return retValue; \ +} + +#define DO_MBED_LOCKED_CAS_OP(T, fn_suffix) \ +bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + bool success; \ + T currentValue; \ + core_util_critical_section_enter(); \ + currentValue = *ptr; \ + if (currentValue == *expectedCurrentValue) { \ + *ptr = desiredValue; \ + success = true; \ + } else { \ + *expectedCurrentValue = currentValue; \ + success = false; \ + } \ + core_util_critical_section_exit(); \ + return success; \ +} \ + \ +bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_cas_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +#if MBED_EXCLUSIVE_ACCESS +/* These are the C99 external definitions for the inline functions */ +/* We maintain external definitions rather than using "static inline" for backwards binary compatibility + * and to give the compiler plenty of leeway to choose to not inline in both C and C++ modes + */ + +extern inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); + +extern inline uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t newValue); +extern inline uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t newValue); +extern inline uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t newValue); +extern inline uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg); +extern inline uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg); +extern inline uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg); +extern inline bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); +extern inline bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); +extern inline bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); +extern inline bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +#else + +bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) +{ + core_util_critical_section_enter(); + uint8_t currentValue = flagPtr->_flag; + flagPtr->_flag = true; + core_util_critical_section_exit(); + return currentValue; +} +#endif + +/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + core_util_critical_section_enter(); + uint64_t currentValue = *valuePtr; + core_util_critical_section_exit(); + return currentValue; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + core_util_critical_section_enter(); + *valuePtr = desiredValue; + core_util_critical_section_exit(); +} + +/* Now locked operations for whichever we don't have lock-free ones for */ +#if MBED_EXCLUSIVE_ACCESS +/* Just need 64-bit locked operations */ +#define DO_MBED_LOCKED_OPS(name, OP, retValue) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_OPS() \ + DO_MBED_LOCKED_CAS_OP(uint64_t, u64) +#else +/* All the operations are locked */ +#define DO_MBED_LOCKED_OPS(name, OP, retValue) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint8_t, u8) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint16_t, u16) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint32_t, u32) \ + DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_OPS() \ + DO_MBED_LOCKED_CAS_OP(uint8_t, u8) \ + DO_MBED_LOCKED_CAS_OP(uint16_t, u16) \ + DO_MBED_LOCKED_CAS_OP(uint32_t, u32) \ + DO_MBED_LOCKED_CAS_OP(uint64_t, u64) +#endif + +// *INDENT-OFF* +DO_MBED_LOCKED_OPS(exchange, arg, oldValue) +DO_MBED_LOCKED_OPS(incr, oldValue + arg, newValue) +DO_MBED_LOCKED_OPS(decr, oldValue - arg, newValue) +DO_MBED_LOCKED_OPS(fetch_add, oldValue + arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_sub, oldValue - arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_and, oldValue & arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_or, oldValue | arg, oldValue) +DO_MBED_LOCKED_OPS(fetch_xor, oldValue ^ arg, oldValue) +DO_MBED_LOCKED_CAS_OPS() +// *INDENT-ON* + +/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */ +extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); +extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); diff --git a/platform/internal/mbed_atomic_impl.h b/platform/internal/mbed_atomic_impl.h new file mode 100644 index 0000000..cbabd4b --- /dev/null +++ b/platform/internal/mbed_atomic_impl.h @@ -0,0 +1,1200 @@ + +/* + * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MBED_ATOMIC_IMPL_H__ +#define __MBED_ATOMIC_IMPL_H__ + +#ifndef __MBED_UTIL_ATOMIC_H__ +#error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" +#endif + +#include +#include "cmsis.h" +#include "platform/mbed_assert.h" +#include "platform/mbed_toolchain.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef MBED_DEBUG +/* Plain loads must not have "release" or "acquire+release" order */ +#define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) + +/* Plain stores must not have "consume", "acquire" or "acquire+release" order */ +#define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) + +/* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */ +#define MBED_CHECK_CAS_ORDER(success, failure) \ + MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) +#else +#define MBED_CHECK_LOAD_ORDER(order) (void)0 +#define MBED_CHECK_STORE_ORDER(order) (void)0 +#define MBED_CHECK_CAS_ORDER(success, failure) (void)0 +#endif + +/* This is currently just to silence unit tests, so no better test required */ +#ifdef __MBED__ +#define MBED_ATOMIC_PTR_SIZE 32 +#else +#define MBED_ATOMIC_PTR_SIZE 64 +#endif + +/* Place barrier after a load or read-modify-write if a consume or acquire operation */ +#define MBED_ACQUIRE_BARRIER(order) do { \ + if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ + MBED_BARRIER(); \ + } } while (0) + +/* Place barrier before a store or read-modify-write if a release operation */ +#define MBED_RELEASE_BARRIER(order) do { \ + if ((order) & mbed_memory_order_release) { \ + MBED_BARRIER(); \ + } } while (0) + +/* Place barrier after a plain store if a sequentially consistent */ +#define MBED_SEQ_CST_BARRIER(order) do { \ + if ((order) == mbed_memory_order_seq_cst) { \ + MBED_BARRIER(); \ + } } while (0) + + + +#if MBED_EXCLUSIVE_ACCESS + +/* This header file provides C inline definitions for atomic functions. */ +/* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */ + +/****************************** ASSEMBLER **********************************/ + +// Fiddle about with constraints. These work for GCC and clang, but +// IAR appears to be restricted to having only a single constraint, +// so we can't do immediates. +#if MBED_EXCLUSIVE_ACCESS_THUMB1 +#define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB +#define MBED_CMP_IMM "I" // CMP 8-bit immediate +#define MBED_SUB3_IMM "L" // -7 to +7 +#else +#define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers +#define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate +#define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate +#endif + +// ARM C 5 inline assembler recommends against using LDREX/STREX +// for same reason as intrinsics, but there's no other way to get +// inlining. ARM C 5 is being retired anyway. + +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [newValue] "r" (newValue) \ + : \ + ) +#elif defined __ICCARM__ +/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ +#define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [newValue] "r" (newValue) \ + : "memory" \ + ) +#endif + +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + OP newValue, oldValue, arg \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [newValue] "=&" MBED_DOP_REG (newValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [arg] Constants MBED_DOP_REG (arg) \ + : "cc" \ + ) +#elif defined __ICCARM__ +/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */ +#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + #OP "\t%[newValue], %[oldValue], %[arg]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&r" (newValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [arg] "r" (arg) \ + : "memory", "cc" \ + ) +#endif + +/* Bitwise operations are harder to do in ARMv8-M baseline - there + * are only 2-operand versions of the instructions. + */ +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + __asm { \ + LDREX##M oldValue, [valuePtr] \ + MOV newValue, oldValue \ + OP newValue, arg \ + STREX##M fail, newValue, [valuePtr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "MOV" "\t%[newValue], %[oldValue]\n\t" \ + #OP "\t%[newValue], %[arg]\n\t" \ + "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&l" (newValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*valuePtr) \ + : [arg] Constants "l" (arg) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "MOV" "\t%[newValue], %[oldValue]\n" \ + #OP "\t%[newValue], %[arg]\n" \ + "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ + : [oldValue] "=&r" (oldValue), \ + [newValue] "=&r" (newValue), \ + [fail] "=&r" (fail) \ + : [valuePtr] "r" (valuePtr), \ + [arg] "r" (arg) \ + : "memory", "cc" \ + ) +#endif + +/* Note that we split ARM and Thumb implementations for CAS, as + * the key distinction is the handling of conditions. Thumb-2 IT is + * partially deprecated, so avoid it, making Thumb-1 and Thumb-2 + * implementations the same. + */ +#if MBED_EXCLUSIVE_ACCESS_ARM +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm { \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + STREX##M##EQ fail, desiredValue, [ptr] \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "ILr" (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr), \ + : "memory", "cc" \ + ) +#endif +#else // MBED_EXCLUSIVE_ACCESS_ARM +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm { \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + BNE done \ + STREX##M fail, desiredValue, [ptr] \ +done: \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + __asm volatile ( \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "BNE" "\t%=f\n\t" \ + "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ + "%=:" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [fail] "=&" MBED_DOP_REG (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ + asm volatile ( \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "BNE" "\tdone\n\t" \ + "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + "done:" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr) \ + : "memory", "cc" \ + ) +#endif +#endif // MBED_EXCLUSIVE_ACCESS_ARM + +/* For strong CAS, conditional execution is complex enough to + * not be worthwhile, so all implementations look like Thumb-1. + * (This is the operation for which STREX returning 0 for success + * is beneficial.) + */ +#ifdef __CC_ARM +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + __asm { \ + retry: \ + LDREX##M oldValue, [ptr] \ + SUBS fail, oldValue, expectedValue \ + BNE done \ + STREX##M fail, desiredValue, [ptr] \ + CMP fail, #0 \ + BNE retry \ + done: \ + } +#elif defined __clang__ || defined __GNUC__ +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + __asm volatile ( \ + "\n%=:\n\t" \ + "LDREX"#M "\t%[oldValue], %[value]\n\t" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ + "BNE" "\t%=f\n" \ + "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ + "CMP" "\t%[fail], #0\n\t" \ + "BNE" "\t%=b\n" \ + "%=:" \ + : [oldValue] "=&" MBED_DOP_REG (oldValue), \ + [fail] "=&" MBED_DOP_REG (fail), \ + [value] "+Q" (*ptr) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ + : "cc" \ + ) +#elif defined __ICCARM__ +#define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ + asm volatile ( \ + "retry:\n" \ + "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ + "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ + "BNE" "\tdone\n" \ + "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ + "CMP" "\t%[fail], #0\n" \ + "BNE" "\tretry\n" \ + "done:" \ + : [oldValue] "=&r" (oldValue), \ + [fail] "=&r" (fail) \ + : [desiredValue] "r" (desiredValue), \ + [expectedValue] "r" (expectedValue), \ + [valuePtr] "r" (ptr) \ + : "memory", "cc" \ + ) +#endif + +/********************* LOCK-FREE IMPLEMENTATION MACROS ****************/ + +/* Note care taken with types here. Values which the assembler outputs correctly + * narrowed, or inputs without caring about width, are marked as type T. Other + * values are uint32_t. It's not clear from documentation whether assembler + * assumes anything about widths, but try to signal correctly to get necessary + * narrowing, and avoid unnecessary. + * Tests show that GCC in particular will pass in unnarrowed values - eg passing + * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8, + * but wouldn't be for compare_and_exchange_u8. + * On the other hand, it seems to be impossible to stop GCC inserting narrowing + * instructions for the output - it will always put in UXTB for the oldValue of + * an operation. + */ +#define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ +inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ +{ \ + T oldValue; \ + uint32_t fail; \ + MBED_BARRIER(); \ + DO_MBED_LOCKFREE_EXCHG_ASM(M); \ + MBED_BARRIER(); \ + return oldValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ + volatile T *valuePtr, T newValue, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail; \ + MBED_RELEASE_BARRIER(order); \ + DO_MBED_LOCKFREE_EXCHG_ASM(M); \ + MBED_ACQUIRE_BARRIER(order); \ + return oldValue; \ +} + +#define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ +inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + MBED_BARRIER(); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_BARRIER(); \ + return !fail; \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + MBED_RELEASE_BARRIER(success); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_ACQUIRE_BARRIER(fail ? failure : success); \ + return !fail; \ +} + +#define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ +inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + MBED_BARRIER(); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_BARRIER(); \ + return !fail; \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + MBED_RELEASE_BARRIER(success); \ + T oldValue; \ + uint32_t fail, expectedValue = *expectedCurrentValue; \ + DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ + if (fail) { \ + *expectedCurrentValue = oldValue; \ + } \ + MBED_ACQUIRE_BARRIER(fail ? failure : success); \ + return !fail; \ +} + + +#define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_BARRIER(); \ + do { \ + DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_BARRIER(); \ + return (T) retValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_RELEASE_BARRIER(order); \ + do { \ + DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_ACQUIRE_BARRIER(order); \ + return (T) retValue; \ +} \ + +#define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \ +inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_BARRIER(); \ + do { \ + DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_BARRIER(); \ + return (T) retValue; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + T oldValue; \ + uint32_t fail, newValue; \ + MBED_RELEASE_BARRIER(order); \ + do { \ + DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ + } while (fail); \ + MBED_ACQUIRE_BARRIER(order); \ + return (T) retValue; \ +} \ + +inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr) +{ + MBED_BARRIER(); + bool oldValue, newValue = true; + uint32_t fail; + do { + DO_MBED_LOCKFREE_EXCHG_ASM(B); + } while (fail); + MBED_BARRIER(); + return oldValue; +} + +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order) +{ + MBED_RELEASE_BARRIER(order); + bool oldValue, newValue = true; + uint32_t fail; + do { + DO_MBED_LOCKFREE_EXCHG_ASM(B); + } while (fail); + MBED_ACQUIRE_BARRIER(order); + return oldValue; +} + +/********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/ + +#define DO_MBED_LOCKFREE_EXCHG_OPS() \ + DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \ + DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) + +#define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ + DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) + + +// We always use the "S" form of operations - avoids yet another +// possible unneeded distinction between Thumbv1 and Thumbv2, and +// may reduce code size by allowing 16-bit instructions. +#if !MBED_EXCLUSIVE_ACCESS_THUMB1 +// I constraint is 12-bit modified immediate constant +// L constraint is negated 12-bit modified immediate constant +// (relying on assembler to swap ADD/SUB) +// We could permit J (-4095 to +4095) if we used ADD/SUB +// instead of ADDS/SUBS, but then that would block generation +// of the 16-bit forms. Shame we can't specify "don't care" +// for the "S", or get the GNU multi-alternative to +// choose ADDS/ADD appropriately. +DO_MBED_LOCKFREE_3OPS(incr, ADDS, "IL", newValue) +DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue) + +DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue) +DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue) +// K constraint is inverted 12-bit modified immediate constant +// (relying on assembler substituting BIC for AND) +DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue) +#if MBED_EXCLUSIVE_ACCESS_ARM +// ARM does not have ORN instruction, so take plain immediates. +DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "I", oldValue) +#else +// Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR. +DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "IK", oldValue) +#endif +// I constraint is 12-bit modified immediate operand +DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS, "I", oldValue) +#else // MBED_EXCLUSIVE_ACCESS_THUMB1 +// L constraint is -7 to +7, suitable for 3-op ADD/SUB +// (relying on assembler to swap ADD/SUB) +DO_MBED_LOCKFREE_3OPS(incr, ADDS, "L", newValue) +DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue) +DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue) +DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue) +DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue) +#endif + +DO_MBED_LOCKFREE_EXCHG_OPS() +DO_MBED_LOCKFREE_CAS_STRONG_OPS() +DO_MBED_LOCKFREE_CAS_WEAK_OPS() + +#define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) +#else // MBED_EXCLUSIVE_ACCESS +/* All the operations are locked, so need no ordering parameters */ +#define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ + DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) +#define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ + DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) + +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order) +{ + return core_util_atomic_flag_test_and_set(valuePtr); +} +#endif // MBED_EXCLUSIVE_ACCESS + +/********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/ + +/* Lock-free loads and stores don't need assembler - just aligned accesses */ +/* Silly ordering of `T volatile` is because T can be `void *` */ +#define DO_MBED_LOCKFREE_LOADSTORE(T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \ +{ \ + T value = *valuePtr; \ + MBED_BARRIER(); \ + return value; \ +} \ + \ +MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \ +{ \ + MBED_CHECK_LOAD_ORDER(order); \ + T value = *valuePtr; \ + MBED_ACQUIRE_BARRIER(order); \ + return value; \ +} \ + \ +MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \ +{ \ + MBED_BARRIER(); \ + *valuePtr = value; \ + MBED_BARRIER(); \ +} \ + \ +MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \ +{ \ + MBED_CHECK_STORE_ORDER(order); \ + MBED_RELEASE_BARRIER(order); \ + *valuePtr = value; \ + MBED_SEQ_CST_BARRIER(order); \ +} + +MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr) +{ + MBED_BARRIER(); + flagPtr->_flag = false; + MBED_BARRIER(); +} + +MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + MBED_RELEASE_BARRIER(order); + flagPtr->_flag = false; + MBED_SEQ_CST_BARRIER(order); +} +DO_MBED_LOCKFREE_LOADSTORE(uint8_t, u8) +DO_MBED_LOCKFREE_LOADSTORE(uint16_t, u16) +DO_MBED_LOCKFREE_LOADSTORE(uint32_t, u32) +DO_MBED_LOCKFREE_LOADSTORE(int8_t, s8) +DO_MBED_LOCKFREE_LOADSTORE(int16_t, s16) +DO_MBED_LOCKFREE_LOADSTORE(int32_t, s32) +DO_MBED_LOCKFREE_LOADSTORE(bool, bool) +DO_MBED_LOCKFREE_LOADSTORE(void *, ptr) + + +/********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/ + +MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr) +{ + return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr); +} + +MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue) +{ + core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +} + +#define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ + (u##T *)expectedCurrentValue, (u##T)desiredValue); \ +} \ + \ +MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ + T *expectedCurrentValue, T desiredValue, \ + mbed_memory_order success, mbed_memory_order failure) \ +{ \ + return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ + (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ +} + +#define DO_MBED_SIGNED_CAS_OPS(name) \ + DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) + +DO_MBED_SIGNED_CAS_OPS(cas) +DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak) + +MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) +{ + return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ + return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure); +} + +inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_cas_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue); +#else + return core_util_atomic_cas_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_cas_explicit_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue, + success, failure); +#else + return core_util_atomic_cas_explicit_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue, + success, failure); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) +{ + return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ + return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure); +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_compare_exchange_weak_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue); +#else + return core_util_atomic_compare_exchange_weak_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return core_util_atomic_compare_exchange_weak_explicit_u32( + (volatile uint32_t *)ptr, + (uint32_t *)expectedCurrentValue, + (uint32_t)desiredValue, + success, failure); +#else + return core_util_atomic_compare_exchange_weak_explicit_u64( + (volatile uint64_t *)ptr, + (uint64_t *)expectedCurrentValue, + (uint64_t)desiredValue, + success, failure); +#endif +} + +#define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ +{ \ + return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ +} + +#define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ +{ \ + return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ +} + +#define DO_MBED_SIGNED_FETCH_OPS(name) \ + DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) + +#define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ + DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) + +DO_MBED_SIGNED_FETCH_OPS(exchange) +DO_MBED_SIGNED_FETCH_OPS(incr) +DO_MBED_SIGNED_FETCH_OPS(decr) +DO_MBED_SIGNED_FETCH_OPS(fetch_add) +DO_MBED_SIGNED_FETCH_OPS(fetch_sub) + +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange) +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add) +DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub) + +MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue) +{ + return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue); +} + +MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order) +{ + return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order); +} + +inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); +#else + return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order); +#else + return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order); +#endif +} + +inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +#else + return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +#endif +} + +inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +#else + return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg); +#else + return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order); +#else + return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg); +#else + return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg); +#endif +} + +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ +#if MBED_ATOMIC_PTR_SIZE == 32 + return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order); +#else + return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order); +#endif +} + +/***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/ + +/* Need to throw away the ordering information for all locked operations */ +MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_LOAD_ORDER(order); + return core_util_atomic_load_u64(valuePtr); +} + +MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_LOAD_ORDER(order); + return core_util_atomic_load_s64(valuePtr); +} + +MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + core_util_atomic_store_u64(valuePtr, desiredValue); +} + +MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order) +{ + MBED_CHECK_STORE_ORDER(order); + core_util_atomic_store_s64(valuePtr, desiredValue); +} + +#define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ +MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ +{ \ + return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ +} + +#define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ +MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ + volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ + MBED_UNUSED mbed_memory_order success, \ + MBED_UNUSED mbed_memory_order failure) \ +{ \ + MBED_CHECK_CAS_ORDER(success, failure); \ + return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or) +DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor) +DO_MBED_LOCKED_CAS_ORDERINGS(cas) +DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak) + +#ifdef __cplusplus +} // extern "C" + +/***************** TEMPLATE IMPLEMENTATIONS *****************/ + +/* Each of these groups provides specialisations for the T template for each of + * the small types (there is no base implementation), and the base implementation + * of the T * template. + */ +#define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ +template<> \ +inline T core_util_atomic_load(const volatile T *valuePtr) \ +{ \ + return core_util_atomic_load_##fn_suffix(valuePtr); \ +} \ + \ +template<> \ +inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \ +{ \ + return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ +} + +template +inline T *core_util_atomic_load(T *const volatile *valuePtr) +{ + return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr); +} + +template +inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) +{ + return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order); +} + +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32) +DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32) +DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64) +DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool) + +#define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ +template<> \ +inline void core_util_atomic_store(volatile T *valuePtr, T val) \ +{ \ + core_util_atomic_store_##fn_suffix(valuePtr, val); \ +} \ + \ +template<> \ +inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \ +{ \ + core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ +} + +template +inline void core_util_atomic_store(T *volatile *valuePtr, T *val) +{ + core_util_atomic_store_ptr((void *volatile *) valuePtr, val); +} + +template +inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) +{ + core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order); +} + +DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32) +DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64) +DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8) +DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16) +DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32) +DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64) +DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool) + +#define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ +template<> inline \ +bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ +{ \ + return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ +} + +template +inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +{ + return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); +} + +template +inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) +{ + return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue); +} + +#define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ + DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) + +DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas) +DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak) + +#define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ +template<> \ +inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \ +{ \ + return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ +} \ + \ +template<> \ +inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ + mbed_memory_order order) \ +{ \ + return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ +} + + +template<> +inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) +{ + return core_util_atomic_exchange_bool(valuePtr, arg); +} + +template<> +inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) +{ + return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order); +} + +template +inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) +{ + return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg); +} + +template +inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order); +} + +template +inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) +{ + return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T)); +} + +template +inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); +} + +template +inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) +{ + return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T)); +} + +template +inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) +{ + return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order); +} + + +#define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) + +#define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ + DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) + +DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange) +DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add) +DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub) +DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or) +DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor) + +#endif // __cplusplus + +#undef MBED_DOP_REG +#undef MBED_CMP_IMM +#undef MBED_SUB3_IMM +#undef DO_MBED_LOCKFREE_EXCHG_ASM +#undef DO_MBED_LOCKFREE_3OP_ASM +#undef DO_MBED_LOCKFREE_2OP_ASM +#undef DO_MBED_LOCKFREE_CAS_WEAK_ASM +#undef DO_MBED_LOCKFREE_CAS_STRONG_ASM +#undef DO_MBED_LOCKFREE_LOADSTORE +#undef DO_MBED_LOCKFREE_EXCHG_OP +#undef DO_MBED_LOCKFREE_CAS_WEAK_OP +#undef DO_MBED_LOCKFREE_CAS_STRONG_OP +#undef DO_MBED_LOCKFREE_2OP +#undef DO_MBED_LOCKFREE_3OP +#undef DO_MBED_LOCKFREE_EXCHG_OPS +#undef DO_MBED_LOCKFREE_2OPS +#undef DO_MBED_LOCKFREE_3OPS +#undef DO_MBED_LOCKFREE_CAS_WEAK_OPS +#undef DO_MBED_LOCKFREE_CAS_STRONG_OPS +#undef DO_MBED_SIGNED_CAS_OP +#undef DO_MBED_SIGNED_CAS_OPS +#undef DO_MBED_SIGNED_FETCH_OP +#undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP +#undef DO_MBED_SIGNED_FETCH_OPS +#undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS +#undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS +#undef DO_MBED_LOCKED_CAS_ORDERINGS +#undef MBED_ACQUIRE_BARRIER +#undef MBED_RELEASE_BARRIER +#undef MBED_SEQ_CST_BARRIER +#undef DO_MBED_ATOMIC_LOAD_TEMPLATE +#undef DO_MBED_ATOMIC_STORE_TEMPLATE +#undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE +#undef DO_MBED_ATOMIC_CAS_TEMPLATE +#undef DO_MBED_ATOMIC_CAS_TEMPLATES +#undef DO_MBED_ATOMIC_FETCH_TEMPLATE +#undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES +#undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES + +#endif diff --git a/platform/mbed_atomic.h b/platform/mbed_atomic.h new file mode 100644 index 0000000..4852c9b --- /dev/null +++ b/platform/mbed_atomic.h @@ -0,0 +1,981 @@ + +/* + * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MBED_UTIL_ATOMIC_H__ +#define __MBED_UTIL_ATOMIC_H__ + +#include "cmsis.h" + +#include +#include +#include +#include "mbed_toolchain.h" + +/** \addtogroup platform */ +/** @{*/ + +/** + * \defgroup platform_atomic atomic functions + * + * Atomic functions function analogously to C11 and C++11 - loads have + * acquire semantics, stores have release semantics, and atomic operations + * are sequentially consistent. Atomicity is enforced both between threads and + * interrupt handlers. + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Memory order constraints for atomic operations. Intended semantics + * are as per C++11. + */ +typedef enum mbed_memory_order { + /* Bits 0 = consume + * 1 = acquire (explicitly requested, or implied by seq.cst) + * 2 = release (explicitly requested, or implied by seq.cst) + * 4 = sequentially consistent + */ + mbed_memory_order_relaxed = 0x00, + mbed_memory_order_consume = 0x01, + mbed_memory_order_acquire = 0x02, + mbed_memory_order_release = 0x04, + mbed_memory_order_acq_rel = 0x06, + mbed_memory_order_seq_cst = 0x16 +} mbed_memory_order; + +// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros +#ifndef MBED_EXCLUSIVE_ACCESS +#ifndef __EXCLUSIVE_ACCESS +#if defined __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH +#if ((__ARM_ARCH_7M__ == 1U) || \ + (__ARM_ARCH_7EM__ == 1U) || \ + (__ARM_ARCH_8M_BASE__ == 1U) || \ + (__ARM_ARCH_8M_MAIN__ == 1U)) || \ + (__ARM_ARCH_7A__ == 1U) +#define MBED_EXCLUSIVE_ACCESS 1U +#define MBED_EXCLUSIVE_ACCESS_THUMB1 (__ARM_ARCH_8M_BASE__ == 1U) +#ifdef __ICCARM__ +#if __CPU_MODE__ == 2 +#define MBED_EXCLUSIVE_ACCESS_ARM 1U +#else +#define MBED_EXCLUSIVE_ACCESS_ARM 0U +#endif +#else +#if !defined (__thumb__) +#define MBED_EXCLUSIVE_ACCESS_ARM 1U +#else +#define MBED_EXCLUSIVE_ACCESS_ARM 0U +#endif +#endif +#elif (__ARM_ARCH_6M__ == 1U) +#define MBED_EXCLUSIVE_ACCESS 0U +#else +#error "Unknown ARM architecture for exclusive access" +#endif // __ARM_ARCH_xxx +#else // __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH +// Seem to be compiling for non-ARM, so stick with critical section implementations +#define MBED_EXCLUSIVE_ACCESS 0U +#endif +#else +#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS +#endif +#endif + +#if MBED_EXCLUSIVE_ACCESS +#define MBED_INLINE_IF_EX inline +#else +#define MBED_INLINE_IF_EX +#endif + +/** + * A lock-free, primitive atomic flag. + * + * Emulate C11's atomic_flag. The flag is initially in an indeterminate state + * unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT. + */ +typedef struct core_util_atomic_flag { + uint8_t _flag; +} core_util_atomic_flag; + +/** + * Initializer for a core_util_atomic_flag. + * + * Example: + * ~~~ + * core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT; + * ~~~ + */ +#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 } + +/** + * Atomic test and set. + * + * Atomically tests then sets the flag to true, returning the previous value. + * + * @param flagPtr Target flag being tested and set. + * @return The previous value. + */ +MBED_INLINE_IF_EX bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); + +/** \ copydoc core_util_atomic_flag_test_and_set + * @param order memory ordering constraint + */ +MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order); + +/** + * Atomic clear. + * + * @param flagPtr Target flag being cleared. + */ +MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr); + +/** \ copydoc core_util_atomic_flag_clear + * @param order memory ordering constraint + */ +MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order); + + +/** + * Atomic compare and set. It compares the contents of a memory location to a + * given value and, only if they are the same, modifies the contents of that + * memory location to a given new value. This is done as a single atomic + * operation. The atomicity guarantees that the new value is calculated based on + * up-to-date information; if the value had been updated by another thread in + * the meantime, the write would fail due to a mismatched expectedCurrentValue. + * + * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect + * you to the article on compare-and swap]. + * + * @param ptr The target memory location. + * @param[in,out] expectedCurrentValue A pointer to some location holding the + * expected current value of the data being set atomically. + * The computed 'desiredValue' should be a function of this current value. + * @note: This is an in-out parameter. In the + * failure case of atomic_cas (where the + * destination isn't set), the pointee of expectedCurrentValue is + * updated with the current value. + * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. + * + * @return true if the memory location was atomically + * updated with the desired value (after verifying + * that it contained the expectedCurrentValue), + * false otherwise. In the failure case, + * exepctedCurrentValue is updated with the new + * value of the target memory location. + * + * pseudocode: + * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { + * if *p != *old { + * *old = *p + * return false + * } + * *p = new + * return true + * } + * + * @note: In the failure case (where the destination isn't set), the value + * pointed to by expectedCurrentValue is instead updated with the current value. + * This property helps writing concise code for the following incr: + * + * function incr(p : pointer to int, a : int) returns int { + * done = false + * value = atomic_load(p) + * while not done { + * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success + * } + * return value + a + * } + * + * However, if the call is made in a loop like this, the atomic_compare_exchange_weak + * functions are to be preferred. + * + * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it + * always succeeds if the current value is expected, as per the pseudocode + * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. + * This call would normally be used when a fail return does not retry. + */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); + +/** \copydoc core_util_atomic_cas_u8 + * @param success memory ordering constraint for successful exchange + * @param failure memory ordering constraint for failure + */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_cas_u8 */ +inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); + +/** \copydoc core_util_atomic_cas_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure); + + + +/** + * Atomic compare and set. It compares the contents of a memory location to a + * given value and, only if they are the same, modifies the contents of that + * memory location to a given new value. This is done as a single atomic + * operation. The atomicity guarantees that the new value is calculated based on + * up-to-date information; if the value had been updated by another thread in + * the meantime, the write would fail due to a mismatched expectedCurrentValue. + * + * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect + * you to the article on compare-and swap]. + * + * @param ptr The target memory location. + * @param[in,out] expectedCurrentValue A pointer to some location holding the + * expected current value of the data being set atomically. + * The computed 'desiredValue' should be a function of this current value. + * @note: This is an in-out parameter. In the + * failure case of atomic_cas (where the + * destination isn't set), the pointee of expectedCurrentValue is + * updated with the current value. + * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. + * + * @return true if the memory location was atomically + * updated with the desired value (after verifying + * that it contained the expectedCurrentValue), + * false otherwise. In the failure case, + * exepctedCurrentValue is updated with the new + * value of the target memory location. + * + * pseudocode: + * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { + * if *p != *old or spurious failure { + * *old = *p + * return false + * } + * *p = new + * return true + * } + * + * @note: In the failure case (where the destination isn't set), the value + * pointed to by expectedCurrentValue is instead updated with the current value. + * This property helps writing concise code for the following incr: + * + * function incr(p : pointer to int, a : int) returns int { + * done = false + * value = *p // This fetch operation need not be atomic. + * while not done { + * done = atomic_compare_exchange_weak(p, &value, value + a) // *value gets updated automatically until success + * } + * return value + a + * } + * + * @note: This corresponds to the C11 "atomic_compare_exchange_weak" - it + * may spuriously fail if the current value is expected, as per the pseudocode + * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. + * This call would normally be used when a fail return will cause a retry anyway, + * saving the need for an extra loop inside the cas operation. + */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 + * @param success memory ordering constraint for successful exchange + * @param failure memory ordering constraint for failure + */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure); + +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); + +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure); + + +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr); + +/** + * \copydoc core_util_atomic_load_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_load_explicit_u8(const volatile uint8_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_load_explicit_u16(const volatile uint16_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_load_explicit_u32(const volatile uint32_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_load_explicit_s8(const volatile int8_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_load_explicit_s16(const volatile int16_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_load_explicit_s32(const volatile int32_t *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE bool core_util_atomic_load_explicit_bool(const volatile bool *valuePtr, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr); + +/** \copydoc core_util_atomic_load_u8 */ +MBED_FORCEINLINE void *core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order); + +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); + +/** + * \copydoc core_util_atomic_store_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_store_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue); + +/** \copydoc core_util_atomic_store_explicit_u8 */ +MBED_FORCEINLINE void core_util_atomic_store_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); + +/** \copydoc core_util_atomic_exchange_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_exchange_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_exchange_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_exchange_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_exchange_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_exchange_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order); + +/** \copydoc core_util_atomic_exchange_u8 */ +inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); + +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order); + +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta); + +/** \copydoc core_util_atomic_incr_u8 */ +inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); + +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta); + +/** \copydoc core_util_atomic_decr_u8 */ +inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); + +/** + * Atomic add. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the addition. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_add_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_add_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_add_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_s8(volatile int8_t *valuePtr, int8_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_s16(volatile int16_t *valuePtr, int16_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_s32(volatile int32_t *valuePtr, int32_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_s64(volatile int64_t *valuePtr, int64_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_add_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_fetch_add_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +/** + * Atomic subtract. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the subtraction. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_sub_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_sub_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_sub_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_s8(volatile int8_t *valuePtr, int8_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_s16(volatile int16_t *valuePtr, int16_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_s32(volatile int32_t *valuePtr, int32_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_s64(volatile int64_t *valuePtr, int64_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_sub_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */ +MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +/** + * Atomic bitwise and. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_and_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_and_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_and_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_and_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_and_u8 */ +uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_and_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_and_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** + * Atomic bitwise inclusive or. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_or_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_or_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_or_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_or_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_or_u8 */ +uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_or_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_or_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +/** + * Atomic bitwise exclusive or. + * @param valuePtr Target memory location being modified. + * @param arg The argument for the bitwise operation. + * @return The original value. + */ +MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg); + +/** \copydoc core_util_atomic_fetch_xor_u8 + * @param order memory ordering constraint + */ +MBED_FORCEINLINE uint8_t core_util_atomic_fetch_xor_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint16_t core_util_atomic_fetch_xor_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint32_t core_util_atomic_fetch_xor_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_fetch_xor_u8 */ +uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg); + +/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */ +MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order); + +#ifdef __cplusplus +} // extern "C" + +// For each operation, two overloaded templates: +// * one for non-pointer types, which has implementations based on the +// u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation. +// * one for any pointer type, generically implemented based on ptr function above. +// +// Templates use standard C/C++ naming - old incr/decr/cas forms are not provided. +// +// Note that C++ template selection somewhat inhibits the ease of use of these templates. +// Ambiguities arise with setting pointers to NULL, or adding constants to integers. +// It may be necessary to cast the argument or desired value to the correct type, or +// explictly specify the type - eg core_util_atomic_store(&fh, NULL) or +// core_util_atomic_store(&val, (uint8_t)1). +// A proper mbed::Atomic class would solve the issue. + +/** \copydoc core_util_atomic_load_u8 */ +template T core_util_atomic_load(const volatile T *valuePtr); +/** \copydoc core_util_atomic_store_u8 */ +template void core_util_atomic_store(volatile T *valuePtr, T desiredValue); +/** \copydoc core_util_atomic_exchange_u8 */ +template T core_util_atomic_exchange(volatile T *ptr, T desiredValue); +/** \copydoc core_util_atomic_cas_u8 */ +template bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +/** \copydoc core_util_atomic_compare_exchange_weak_u8 */ +template bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue); +/** \copydoc core_util_fetch_add_u8 */ +template T core_util_atomic_fetch_add(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_sub_u8 */ +template T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_and_u8 */ +template T core_util_atomic_fetch_and(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_or_u8 */ +template T core_util_atomic_fetch_or(volatile T *valuePtr, T arg); +/** \copydoc core_util_fetch_xor_u8 */ +template T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg); + +/** \copydoc core_util_atomic_load_explicit_u8 */ +template T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_u8 */ +template void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_exchange_explicit_u8 */ +template T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_cas_explicit_u8 */ +template bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */ +template bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_fetch_add_explicit_u8 */ +template T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_sub_explicit_u8 */ +template T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_and_explicit_u8 */ +template T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_or_explicit_u8 */ +template T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); +/** \copydoc core_util_fetch_xor_explicit_u8 */ +template T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); + +/** \copydoc core_util_atomic_load_ptr */ +template inline T *core_util_atomic_load(T *const volatile *valuePtr); +/** \copydoc core_util_atomic_store_ptr */ +template inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue); +/** \copydoc core_util_atomic_exchange_ptr */ +template inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue); +/** \copydoc core_util_atomic_cas_ptr */ +template inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +/** \copydoc core_util_atomic_compare_exchange_weak_ptr */ +template inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue); +/** \copydoc core_util_fetch_add_ptr */ +template inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg); +/** \copydoc core_util_fetch_sub_ptr */ +template inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg); + +/** \copydoc core_util_atomic_load_explicit_ptr */ +template inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order); +/** \copydoc core_util_atomic_store_explicit_ptr */ +template inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_exchange_explicit_ptr */ +template inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order); +/** \copydoc core_util_atomic_cas_explicit_ptr */ +template inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */ +template inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure); +/** \copydoc core_util_fetch_add_explicit_ptr */ +template inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); +/** \copydoc core_util_fetch_sub_explicit_ptr */ +template inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order); + +#endif // __cplusplus + +/**@}*/ + +/**@}*/ + +/* Hide the implementation away */ +#include "platform/internal/mbed_atomic_impl.h" + +#endif // __MBED_UTIL_ATOMICL_H__ + + + diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index 8b84773..4fe7e6a 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -24,25 +24,6 @@ #include "platform/mbed_critical.h" #include "platform/mbed_toolchain.h" -// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros -#ifndef MBED_EXCLUSIVE_ACCESS -#ifndef __EXCLUSIVE_ACCESS -#if ((__ARM_ARCH_7M__ == 1U) || \ - (__ARM_ARCH_7EM__ == 1U) || \ - (__ARM_ARCH_8M_BASE__ == 1U) || \ - (__ARM_ARCH_8M_MAIN__ == 1U)) || \ - (__ARM_ARCH_7A__ == 1U) -#define MBED_EXCLUSIVE_ACCESS 1U -#elif (__ARM_ARCH_6M__ == 1U) -#define MBED_EXCLUSIVE_ACCESS 0U -#else -#error "Unknown architecture for exclusive access" -#endif -#else -#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS -#endif -#endif - static uint32_t critical_section_reentrancy_counter = 0; bool core_util_are_interrupts_enabled(void) @@ -99,413 +80,3 @@ hal_critical_section_exit(); } } - -/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */ -MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte"); - -#if MBED_EXCLUSIVE_ACCESS - -/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */ -#if defined (__CC_ARM) -#pragma diag_suppress 3731 -#endif - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - MBED_BARRIER(); - uint8_t currentValue; - do { - currentValue = __LDREXB(&flagPtr->_flag); - } while (__STREXB(true, &flagPtr->_flag)); - MBED_BARRIER(); - return currentValue; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - MBED_BARRIER(); - do { - uint8_t currentValue = __LDREXB(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXB(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - MBED_BARRIER(); - do { - uint16_t currentValue = __LDREXH(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXH(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - MBED_BARRIER(); - do { - uint32_t currentValue = __LDREXW(ptr); - if (currentValue != *expectedCurrentValue) { - *expectedCurrentValue = currentValue; - __CLREX(); - return false; - } - } while (__STREXW(desiredValue, ptr)); - MBED_BARRIER(); - return true; -} - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) -{ - MBED_BARRIER(); - uint8_t currentValue; - do { - currentValue = __LDREXB(valuePtr); - } while (__STREXB(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) -{ - MBED_BARRIER(); - uint16_t currentValue; - do { - currentValue = __LDREXH(valuePtr); - } while (__STREXH(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) -{ - MBED_BARRIER(); - uint32_t currentValue; - do { - currentValue = __LDREXW(valuePtr); - } while (__STREXW(desiredValue, valuePtr)); - MBED_BARRIER(); - return currentValue; -} - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - MBED_BARRIER(); - uint8_t newValue; - do { - newValue = __LDREXB(valuePtr) + delta; - } while (__STREXB(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - MBED_BARRIER(); - uint16_t newValue; - do { - newValue = __LDREXH(valuePtr) + delta; - } while (__STREXH(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - MBED_BARRIER(); - uint32_t newValue; - do { - newValue = __LDREXW(valuePtr) + delta; - } while (__STREXW(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - MBED_BARRIER(); - uint8_t newValue; - do { - newValue = __LDREXB(valuePtr) - delta; - } while (__STREXB(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - MBED_BARRIER(); - uint16_t newValue; - do { - newValue = __LDREXH(valuePtr) - delta; - } while (__STREXH(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - MBED_BARRIER(); - uint32_t newValue; - do { - newValue = __LDREXW(valuePtr) - delta; - } while (__STREXW(newValue, valuePtr)); - MBED_BARRIER(); - return newValue; -} - -#else - -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) -{ - core_util_critical_section_enter(); - uint8_t currentValue = flagPtr->_flag; - flagPtr->_flag = true; - core_util_critical_section_exit(); - return currentValue; -} - -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue) -{ - bool success; - uint8_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue) -{ - bool success; - uint16_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - - -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) -{ - bool success; - uint32_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - - -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) -{ - core_util_critical_section_enter(); - uint8_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) -{ - core_util_critical_section_enter(); - uint16_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) -{ - core_util_critical_section_enter(); - uint32_t currentValue = *ptr; - *ptr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - - -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - uint8_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - uint16_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - uint32_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - - -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) -{ - uint8_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) -{ - uint16_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) -{ - uint32_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -#endif - -/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */ -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) -{ - core_util_critical_section_enter(); - uint64_t currentValue = *valuePtr; - core_util_critical_section_exit(); - return currentValue; -} - -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - core_util_critical_section_enter(); - *valuePtr = desiredValue; - core_util_critical_section_exit(); -} - -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) -{ - core_util_critical_section_enter(); - uint64_t currentValue = *valuePtr; - *valuePtr = desiredValue; - core_util_critical_section_exit(); - return currentValue; -} - -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) -{ - bool success; - uint64_t currentValue; - core_util_critical_section_enter(); - currentValue = *ptr; - if (currentValue == *expectedCurrentValue) { - *ptr = desiredValue; - success = true; - } else { - *expectedCurrentValue = currentValue; - success = false; - } - core_util_critical_section_exit(); - return success; -} - -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - uint64_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr + delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) -{ - uint64_t newValue; - core_util_critical_section_enter(); - newValue = *valuePtr - delta; - *valuePtr = newValue; - core_util_critical_section_exit(); - return newValue; -} - -MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit"); - -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) -{ - return core_util_atomic_cas_u32( - (volatile uint32_t *)ptr, - (uint32_t *)expectedCurrentValue, - (uint32_t)desiredValue); -} - -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) -{ - return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); -} - -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta) -{ - return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index 5de8de8..b6e68f5 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -20,9 +20,6 @@ #define __MBED_UTIL_CRITICAL_H__ #include -#include -#include -#include "mbed_toolchain.h" #ifdef __cplusplus extern "C" { @@ -92,658 +89,11 @@ /**@}*/ -/** - * \defgroup platform_atomic atomic functions - * - * Atomic functions function analogously to C11 and C++11 - loads have - * acquire semantics, stores have release semantics, and atomic operations - * are sequentially consistent. Atomicity is enforced both between threads and - * interrupt handlers. - * - * @{ - */ - -/** - * A lock-free, primitive atomic flag. - * - * Emulate C11's atomic_flag. The flag is initially in an indeterminate state - * unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT. - */ -typedef struct core_util_atomic_flag { - uint8_t _flag; -} core_util_atomic_flag; - -/** - * Initializer for a core_util_atomic_flag. - * - * Example: - * ~~~ - * core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT; - * ~~~ - */ -#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 } - -/** - * Atomic test and set. - * - * Atomically tests then sets the flag to true, returning the previous value. - * - * @param flagPtr Target flag being tested and set. - * @return The previous value. - */ -bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr); - -/** - * Atomic clear. - * - * @param flagPtr Target flag being cleared. - */ -MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr) -{ - MBED_BARRIER(); - flagPtr->_flag = false; - MBED_BARRIER(); -} - -/** - * Atomic compare and set. It compares the contents of a memory location to a - * given value and, only if they are the same, modifies the contents of that - * memory location to a given new value. This is done as a single atomic - * operation. The atomicity guarantees that the new value is calculated based on - * up-to-date information; if the value had been updated by another thread in - * the meantime, the write would fail due to a mismatched expectedCurrentValue. - * - * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect - * you to the article on compare-and swap]. - * - * @param ptr The target memory location. - * @param[in,out] expectedCurrentValue A pointer to some location holding the - * expected current value of the data being set atomically. - * The computed 'desiredValue' should be a function of this current value. - * @note: This is an in-out parameter. In the - * failure case of atomic_cas (where the - * destination isn't set), the pointee of expectedCurrentValue is - * updated with the current value. - * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. - * - * @return true if the memory location was atomically - * updated with the desired value (after verifying - * that it contained the expectedCurrentValue), - * false otherwise. In the failure case, - * exepctedCurrentValue is updated with the new - * value of the target memory location. - * - * pseudocode: - * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { - * if *p != *old { - * *old = *p - * return false - * } - * *p = new - * return true - * } - * - * @note: In the failure case (where the destination isn't set), the value - * pointed to by expectedCurrentValue is instead updated with the current value. - * This property helps writing concise code for the following incr: - * - * function incr(p : pointer to int, a : int) returns int { - * done = false - * value = *p // This fetch operation need not be atomic. - * while not done { - * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success - * } - * return value + a - * } - * - * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it - * always succeeds if the current value is expected, as per the pseudocode - * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. - */ -bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int8_t core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue) -{ - return (int8_t)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, (uint8_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int16_t core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue) -{ - return (int16_t)core_util_atomic_cas_u16((volatile uint16_t *)ptr, (uint16_t *)expectedCurrentValue, (uint16_t)desiredValue); -} -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int32_t core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue) -{ - return (int32_t)core_util_atomic_cas_u32((volatile uint32_t *)ptr, (uint32_t *)expectedCurrentValue, (uint32_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE int64_t core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue) -{ - return (int64_t)core_util_atomic_cas_u64((volatile uint64_t *)ptr, (uint64_t *)expectedCurrentValue, (uint64_t)desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) -{ - return (bool)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); -} - -/** \copydoc core_util_atomic_cas_u8 */ -bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr) -{ - uint8_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr) -{ - uint16_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr) -{ - uint32_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr) -{ - int8_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr) -{ - int16_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr) -{ - int32_t value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr) -{ - return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr); -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr) -{ - bool value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic load. - * @param valuePtr Target memory location. - * @return The loaded value. - */ -MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr) -{ - void *value = *valuePtr; - MBED_BARRIER(); - return value; -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue) -{ - core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic store. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - */ -MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue) -{ - MBED_BARRIER(); - *valuePtr = desiredValue; - MBED_BARRIER(); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue) -{ - return (int8_t)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, (uint8_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue) -{ - return (int16_t)core_util_atomic_exchange_u16((volatile uint16_t *)valuePtr, (uint16_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue) -{ - return (int32_t)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue) -{ - return (int64_t)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue) -{ - return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue); -} - -/** - * Atomic exchange. - * @param valuePtr Target memory location. - * @param desiredValue The value to store. - * @return The previous value. - */ -void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta) -{ - return (int8_t)core_util_atomic_incr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta) -{ - return (int16_t)core_util_atomic_incr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta) -{ - return (int32_t)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented. - * @return The new incremented value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta) -{ - return (int64_t)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); -} - -/** - * Atomic increment. - * @param valuePtr Target memory location being incremented. - * @param delta The amount being incremented in bytes. - * @return The new incremented value. - * - * @note The type of the pointer argument is not taken into account - * and the pointer is incremented by bytes. - */ -void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta) -{ - return (int8_t)core_util_atomic_decr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta) -{ - return (int16_t)core_util_atomic_decr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta) -{ - return (int32_t)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented. - * @return The new decremented value. - */ -MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta) -{ - return (int64_t)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); -} - -/** - * Atomic decrement. - * @param valuePtr Target memory location being decremented. - * @param delta The amount being decremented in bytes. - * @return The new decremented value. - * - * @note The type of the pointer argument is not taken into account - * and the pointer is decremented by bytes - */ -void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta); +/**@}*/ #ifdef __cplusplus } // extern "C" #endif -/**@}*/ - -/**@}*/ #endif // __MBED_UTIL_CRITICAL_H__ diff --git a/platform/mbed_error.c b/platform/mbed_error.c index ee5e93e..d30da62 100644 --- a/platform/mbed_error.c +++ b/platform/mbed_error.c @@ -20,6 +20,7 @@ #include "device.h" #include "platform/mbed_crash_data_offsets.h" #include "platform/mbed_retarget.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #include "platform/mbed_error.h" #include "platform/mbed_error_hist.h" diff --git a/platform/mbed_retarget.cpp b/platform/mbed_retarget.cpp index 5a6a041..0ea946b 100644 --- a/platform/mbed_retarget.cpp +++ b/platform/mbed_retarget.cpp @@ -27,6 +27,7 @@ #include "platform/PlatformMutex.h" #include "platform/mbed_error.h" #include "platform/mbed_stats.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #include "platform/mbed_poll.h" #include "platform/PlatformMutex.h" diff --git a/platform/mbed_sleep_manager.c b/platform/mbed_sleep_manager.c index d9ae6a8..a0a5c3d 100644 --- a/platform/mbed_sleep_manager.c +++ b/platform/mbed_sleep_manager.c @@ -17,6 +17,7 @@ #include "mbed_power_mgmt.h" #include "mbed_interface.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_assert.h" #include "mbed_error.h" diff --git a/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c b/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c index 7849ecf..83d4232 100644 --- a/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c +++ b/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_NRF52/serial_api.c @@ -50,6 +50,7 @@ #include "nrf_drv_gpiote.h" #include "PeripheralPins.h" +#include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #if UART0_ENABLED == 0 diff --git a/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp index eed2603..e395dc6 100644 --- a/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_M2351/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp index 18f517a..f0e7a80 100644 --- a/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_M480/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp b/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp index e4fd914..9a74a3c 100644 --- a/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp +++ b/targets/TARGET_NUVOTON/TARGET_NUC472/crypto/crypto-misc.cpp @@ -17,6 +17,7 @@ #include "cmsis.h" #include "mbed_assert.h" +#include "mbed_atomic.h" #include "mbed_critical.h" #include "mbed_error.h" #include diff --git a/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c b/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c index b3dacf0..b683063 100644 --- a/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c +++ b/targets/TARGET_NXP/TARGET_MCUXpresso_MCUS/TARGET_LPC55S69/flash_api.c @@ -15,6 +15,7 @@ */ #include "flash_api.h" +#include "mbed_toolchain.h" #include "mbed_critical.h" #if DEVICE_FLASH diff --git a/targets/TARGET_STM/trng_api.c b/targets/TARGET_STM/trng_api.c index 4f68b60..205598a 100644 --- a/targets/TARGET_STM/trng_api.c +++ b/targets/TARGET_STM/trng_api.c @@ -24,7 +24,7 @@ #include "cmsis.h" #include "trng_api.h" #include "mbed_error.h" -#include "mbed_critical.h" +#include "mbed_atomic.h" #if defined (TARGET_STM32WB) /* Family specific include for WB with HW semaphores */ #include "hw.h" diff --git a/usb/device/USBDevice/USBDevice.h b/usb/device/USBDevice/USBDevice.h index 4409bb3..01e2514 100644 --- a/usb/device/USBDevice/USBDevice.h +++ b/usb/device/USBDevice/USBDevice.h @@ -18,6 +18,7 @@ #ifndef USBDEVICE_H #define USBDEVICE_H +#include #include "USBDevice_Types.h" #include "USBPhy.h" #include "mbed_critical.h"