From 331cd29bf016c48dc29e6f2de39eaa522fa8f464 Mon Sep 17 00:00:00 2001 From: Johnny Shaw Date: Thu, 10 Oct 2024 03:45:31 -0600 Subject: [PATCH] initial clang-tidy linting --- .clang-format | 61 +- .clang-tidy | 3 + .github/workflows/lint.yaml | 2 + .pre-commit-config.yaml | 10 + BUILD.bazel | 6 + MODULE.bazel | 2 +- radiant/.clang-tidy | 8 + radiant/Algorithm.h | 3 - radiant/Atomic.h | 3 + radiant/EmptyOptimizedPair.h | 1 - radiant/Integer.h | 4 +- radiant/Iterator.h | 4 +- radiant/Res.h | 2 + radiant/Result.h | 4 + radiant/ScopeExit.h | 3 +- radiant/SharedPtr.h | 8 +- radiant/Span.h | 6 +- radiant/TotallyRad.h | 15 +- radiant/TypeTraits.h | 2 + radiant/TypeWrapper.h | 1 + radiant/UniqueResource.h | 3 +- radiant/Utility.h | 2 + radiant/Vector.h | 9 +- radiant/detail/AtomicIntrinsics.h | 1455 +++++++++++++++++++++++- radiant/detail/IntrinsicTraits.h | 35 +- radiant/detail/Meta.h | 5 +- radiant/detail/StdTypeTraits.h | 13 +- radiant/detail/VectorOperations.h | 8 +- radiant/detail/gcc/AtomicIntrinsics.h | 191 ---- radiant/detail/msvc/AtomicIntrinsics.h | 1271 --------------------- test/TestThrow.h | 2 +- test/test_EmptyOptimizedPair.cpp | 6 +- test/test_Integer.cpp | 3 +- test/test_Iterator.cpp | 4 +- test/test_Locks.cpp | 4 +- test/test_Result.cpp | 8 +- test/test_ScopeExit.cpp | 4 +- test/test_SharedPtr.cpp | 3 +- test/test_Span.cpp | 4 +- test/test_StdTypeTraits.cpp | 6 +- test/test_TypeWrapper.cpp | 5 +- test/test_UniqueResource.cpp | 4 +- test/test_Utility.cpp | 3 +- test/test_Vector.cpp | 1 - tools/rad/pyproject.toml | 1 + tools/rad/rad/bazel.py | 10 +- tools/rad/rad/clang_tidy.py | 87 ++ tools/rad/rad/cli.py | 12 +- 48 files changed, 1726 insertions(+), 1581 deletions(-) create mode 100644 .clang-tidy create mode 100644 radiant/.clang-tidy delete mode 100644 radiant/detail/gcc/AtomicIntrinsics.h delete mode 100644 radiant/detail/msvc/AtomicIntrinsics.h create mode 100644 tools/rad/rad/clang_tidy.py diff --git a/.clang-format b/.clang-format index 5a3bff4..6343dca 100644 --- a/.clang-format +++ b/.clang-format @@ -6,33 +6,48 @@ UseTab: Never ColumnLimit: 80 ReflowComments: true AccessModifierOffset: -4 -SortIncludes: Never UseCRLF: false BreakBeforeBraces: Custom BraceWrapping: - AfterCaseLabel: true - AfterClass: true - AfterEnum: true - AfterFunction: true - AfterNamespace: true - AfterStruct: true - AfterUnion: true - AfterExternBlock: true - BeforeCatch: true - BeforeElse: true - BeforeLambdaBody: true - BeforeWhile: true - IndentBraces: false - SplitEmptyFunction: true - SplitEmptyRecord: true - SplitEmptyNamespace: true + AfterCaseLabel: true + AfterClass: true + AfterEnum: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + AfterExternBlock: true + BeforeCatch: true + BeforeElse: true + BeforeLambdaBody: true + BeforeWhile: true + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true BreakBeforeBinaryOperators: None BreakBeforeTernaryOperators: true BreakInheritanceList: AfterComma SeparateDefinitionBlocks: Always AlwaysBreakTemplateDeclarations: Yes +SortIncludes: true +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '^"radiant/TotallyRad.h"' + Priority: 0 + SortPriority: 0 + - Regex: '^"radiant/.*\.h"' + Priority: 0 + SortPriority: 1 + - Regex: '^".*\.h"$' + Priority: 1 + SortPriority: 2 + - Regex: '^<.*>$' + Priority: 1 + SortPriority: 3 + BinPackArguments: false BinPackParameters: false AllowAllArgumentsOnNextLine: false @@ -41,13 +56,13 @@ ReferenceAlignment: Left AlignAfterOpenBracket: Align AlignArrayOfStructures: Right AlignConsecutiveBitFields: - Enabled: true - AcrossEmptyLines: false - AcrossComments: true + Enabled: true + AcrossEmptyLines: false + AcrossComments: true AlignConsecutiveMacros: - Enabled: true - AcrossEmptyLines: false - AcrossComments: true + Enabled: true + AcrossEmptyLines: false + AcrossComments: true AlignEscapedNewlines: Right AlignOperands: Align IndentCaseBlocks: false diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..f71fd0a --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,3 @@ +--- +Checks: '-*' +WarningsAsErrors: '*' diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index d841c7f..7328cdf 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -25,5 +25,7 @@ jobs: cache-dependency-path: ./tools/rad/setup.py - name: Install Rad Tool run: pip install -e ./tools/rad + - name: Initialize + run: rad init - name: Run Linter run: rad lint --all-files --skip no-commit-to-branch diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55a5c32..d467de9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,7 @@ repos: rev: 9a9bbc00895bbdb7670231c2565d4f1309c42905 # v1.3.5 hooks: - id: clang-format + args: [-i] - repo: https://github.com/psf/black rev: ec91a2be3c44d88e1a3960a4937ad6ed3b63464e # 23.12.1 hooks: @@ -29,3 +30,12 @@ repos: language: system types: [python] require_serial: true + - repo: local + hooks: + - id: clang-tidy + name: clang-tidy + entry: rad + args: [lint, --clang-tidy] + language: python + files: \.(c|cc|cpp|cxx|h|hh|hpp|hxx|)$ + require_serial: true diff --git a/BUILD.bazel b/BUILD.bazel index e1b96dd..be5d320 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,3 +1,5 @@ +load("@hedron_compile_commands//:refresh_compile_commands.bzl", "refresh_compile_commands") + package(default_visibility = ["//visibility:public"]) exports_files([ @@ -5,6 +7,10 @@ exports_files([ "LICENSE", ]) +refresh_compile_commands( + name = "refresh_compile_commands", +) + config_setting( name = "msvc", flag_values = { diff --git a/MODULE.bazel b/MODULE.bazel index 5e05747..cdb7276 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -25,7 +25,7 @@ bazel_dep(name = "hedron_compile_commands", dev_dependency = True) git_override( module_name = "hedron_compile_commands", remote = "https://github.com/hedronvision/bazel-compile-commands-extractor.git", - commit = "a14ad3a64e7bf398ab48105aaa0348e032ac87f8", + commit = "4f28899228fb3ad0126897876f147ca15026151e", ) # Windows - allows building with the EWDK, must set EWDKDIR environment variable diff --git a/radiant/.clang-tidy b/radiant/.clang-tidy new file mode 100644 index 0000000..de0f9f0 --- /dev/null +++ b/radiant/.clang-tidy @@ -0,0 +1,8 @@ +--- +InheritParentConfig: true +Checks: + - misc-include-cleaner + - llvm-include-order + - performance-* + - portability-* + - -performance-enum-size diff --git a/radiant/Algorithm.h b/radiant/Algorithm.h index 7f2ec40..0bf4ad3 100644 --- a/radiant/Algorithm.h +++ b/radiant/Algorithm.h @@ -14,9 +14,6 @@ #pragma once -#include "radiant/TotallyRad.h" -#include "radiant/TypeTraits.h" -#include "radiant/Utility.h" #include "radiant/TotallyRad.h" namespace rad diff --git a/radiant/Atomic.h b/radiant/Atomic.h index 0b578fe..2b99210 100644 --- a/radiant/Atomic.h +++ b/radiant/Atomic.h @@ -15,8 +15,11 @@ #pragma once #include "radiant/TotallyRad.h" +#include "radiant/TypeTraits.h" #include "radiant/detail/AtomicIntrinsics.h" +#include + namespace rad { diff --git a/radiant/EmptyOptimizedPair.h b/radiant/EmptyOptimizedPair.h index 4d464af..5f407d3 100644 --- a/radiant/EmptyOptimizedPair.h +++ b/radiant/EmptyOptimizedPair.h @@ -14,7 +14,6 @@ #pragma once -#include "radiant/TotallyRad.h" #include "radiant/Utility.h" namespace rad diff --git a/radiant/Integer.h b/radiant/Integer.h index 4692e65..f4bd718 100644 --- a/radiant/Integer.h +++ b/radiant/Integer.h @@ -13,8 +13,10 @@ // limitations under the License. #include "radiant/TotallyRad.h" -#include "radiant/TypeTraits.h" #include "radiant/Res.h" +#include "radiant/TypeTraits.h" + +#include namespace rad { diff --git a/radiant/Iterator.h b/radiant/Iterator.h index 9f35987..56cc429 100644 --- a/radiant/Iterator.h +++ b/radiant/Iterator.h @@ -14,9 +14,11 @@ #pragma once -#include "radiant/TotallyRad.h" +#include "radiant/TypeTraits.h" #include "radiant/Utility.h" +#include + namespace rad { diff --git a/radiant/Res.h b/radiant/Res.h index d82b64a..7234702 100644 --- a/radiant/Res.h +++ b/radiant/Res.h @@ -17,6 +17,8 @@ #include "radiant/TotallyRad.h" #include "radiant/Result.h" +#include + namespace rad { diff --git a/radiant/Result.h b/radiant/Result.h index b1acd9c..5bf59ee 100644 --- a/radiant/Result.h +++ b/radiant/Result.h @@ -15,9 +15,13 @@ #pragma once #include "radiant/TotallyRad.h" +#include "radiant/TypeTraits.h" #include "radiant/TypeWrapper.h" +#include +#if RAD_ENABLE_STD #include +#endif namespace rad { diff --git a/radiant/ScopeExit.h b/radiant/ScopeExit.h index 969b0ad..0ad6113 100644 --- a/radiant/ScopeExit.h +++ b/radiant/ScopeExit.h @@ -15,7 +15,8 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Utility.h" +#include "radiant/TypeTraits.h" +#include "radiant/Utility.h" // NOLINT(misc-include-cleaner) namespace rad { diff --git a/radiant/SharedPtr.h b/radiant/SharedPtr.h index d869d2b..4edf3ee 100644 --- a/radiant/SharedPtr.h +++ b/radiant/SharedPtr.h @@ -15,10 +15,14 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Memory.h" #include "radiant/Atomic.h" -#include "radiant/Locks.h" #include "radiant/EmptyOptimizedPair.h" +#include "radiant/Locks.h" // NOLINT(misc-include-cleaner) +#include "radiant/Memory.h" // NOLINT(misc-include-cleaner) +#include "radiant/TypeTraits.h" +#include "radiant/detail/AtomicIntrinsics.h" + +#include namespace rad { diff --git a/radiant/Span.h b/radiant/Span.h index f4c5982..b2064c7 100644 --- a/radiant/Span.h +++ b/radiant/Span.h @@ -15,8 +15,12 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Iterator.h" #include "radiant/Byte.h" +#include "radiant/Iterator.h" +#include "radiant/TypeTraits.h" + +#include +#include namespace rad { diff --git a/radiant/TotallyRad.h b/radiant/TotallyRad.h index 3dc8b7b..2767262 100644 --- a/radiant/TotallyRad.h +++ b/radiant/TotallyRad.h @@ -154,10 +154,19 @@ static_assert(!(RAD_WINDOWS && RAD_MACOS), "env invalid os"); #define RAD_UNUSED(x) ((void)x) -#include +#ifdef RAD_MSC_VERSION +// MSVC does not seem to have something like __has_builtin, but +// also seems to define a superset of what the other compilers do +// in terms of intrinsics +#define RAD_HAS_BUILTIN(v) 1 +#else +#define RAD_HAS_BUILTIN(v) __has_builtin(v) +#endif -#if RAD_WINDOWS && RAD_KERNEL_MODE -#include +#if RAD_WINDOWS +#define RAD_YIELD_PROCESSOR() YieldProcessor() +#else +#define RAD_YIELD_PROCESSOR() sched_yield() #endif #define RAD_CONCAT_INNER(x, y) x##y diff --git a/radiant/TypeTraits.h b/radiant/TypeTraits.h index 4c0a42e..e318c0c 100644 --- a/radiant/TypeTraits.h +++ b/radiant/TypeTraits.h @@ -17,6 +17,8 @@ #include "radiant/TotallyRad.h" #include "radiant/detail/StdTypeTraits.h" +#include + namespace rad { diff --git a/radiant/TypeWrapper.h b/radiant/TypeWrapper.h index e5d6e02..2c4acce 100644 --- a/radiant/TypeWrapper.h +++ b/radiant/TypeWrapper.h @@ -15,6 +15,7 @@ #pragma once #include "radiant/TotallyRad.h" +#include "radiant/TypeTraits.h" #include "radiant/Utility.h" #if RAD_ENABLE_STD diff --git a/radiant/UniqueResource.h b/radiant/UniqueResource.h index 07eb32d..a8e9a94 100644 --- a/radiant/UniqueResource.h +++ b/radiant/UniqueResource.h @@ -15,7 +15,8 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Utility.h" +#include "radiant/TypeTraits.h" +#include "radiant/Utility.h" // NOLINT(misc-include-cleaner) namespace rad { diff --git a/radiant/Utility.h b/radiant/Utility.h index 6b50344..23152b3 100644 --- a/radiant/Utility.h +++ b/radiant/Utility.h @@ -17,6 +17,8 @@ #include "radiant/TotallyRad.h" #include "radiant/TypeTraits.h" +#include + namespace rad { diff --git a/radiant/Vector.h b/radiant/Vector.h index 23cb3fd..6d7d15f 100644 --- a/radiant/Vector.h +++ b/radiant/Vector.h @@ -15,10 +15,17 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Memory.h" #include "radiant/EmptyOptimizedPair.h" +#include "radiant/Memory.h" +#include "radiant/Res.h" +#include "radiant/Span.h" #include "radiant/detail/VectorOperations.h" +#include +#if RAD_ENABLE_STD +#include +#endif + namespace rad { diff --git a/radiant/detail/AtomicIntrinsics.h b/radiant/detail/AtomicIntrinsics.h index 848f7ef..72e15b6 100644 --- a/radiant/detail/AtomicIntrinsics.h +++ b/radiant/detail/AtomicIntrinsics.h @@ -15,24 +15,21 @@ #pragma once #include "radiant/TotallyRad.h" -#include "radiant/Utility.h" +#include "radiant/TypeTraits.h" +#include "radiant/Utility.h" // NOLINT(misc-include-cleaner) + +#include +#include #if RAD_WINDOWS && RAD_KERNEL_MODE #include #endif - #if RAD_WINDOWS && RAD_USER_MODE #define WIN32_LEAN_AND_MEAN #define WIN32_NO_STATUS -#include +#include // NOLINT(misc-include-cleaner) #undef WIN32_NO_STATUS -#include -#endif - -#if RAD_WINDOWS -#define RAD_YIELD_PROCESSOR() YieldProcessor() -#else -#define RAD_YIELD_PROCESSOR() sched_yield() +#include // NOLINT(misc-include-cleaner) #endif namespace rad @@ -232,9 +229,1441 @@ R ValAs(T val) noexcept } #ifdef RAD_MSC_VERSION -#include "radiant/detail/msvc/AtomicIntrinsics.h" -#elif defined(RAD_GCC_VERSION) || defined(RAD_CLANG_VERSION) -#include "radiant/detail/gcc/AtomicIntrinsics.h" +// NOLINTBEGIN(misc-include-cleaner) +#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) +#define RAD_MEM_BARRIER __dmb(0xb); // _ARM_BARRIER_ISH / _ARM64_BARRIER_ISH +#define RAD_INTRIN_RELAXED(Intrin) RAD_CONCAT(Intrin, _nf) +#define RAD_INTRIN_ACQUIRE(Intrin) RAD_CONCAT(Intrin, _acq) +#define RAD_INTRIN_RELEASE(Intrin) RAD_CONCAT(Intrin, _rel) +#elif defined(_M_IX86) || defined(_M_AMD64) +#define RAD_MEM_BARRIER \ + _Pragma("warning(push)") _Pragma("warning(disable : 4996)") \ + _ReadWriteBarrier() _Pragma("warning(pop)") +#define RAD_INTRIN_RELAXED(Intrin) Intrin +#define RAD_INTRIN_ACQUIRE(Intrin) Intrin +#define RAD_INTRIN_RELEASE(Intrin) Intrin +#endif + +// clang-format off +RAD_INLINE_VAR constexpr MemoryOrder combinedOrders[6][6] = +{ + {MemoryOrder::Relaxed, MemoryOrder::Consume, MemoryOrder::Acquire, MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, + {MemoryOrder::Consume, MemoryOrder::Consume, MemoryOrder::Acquire, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, + {MemoryOrder::Acquire, MemoryOrder::Acquire, MemoryOrder::Acquire, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, + {MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, + {MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, + {MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst} +}; +// clang-format on + +constexpr inline MemoryOrder CombineMemoryOrders(MemoryOrder success, + MemoryOrder fail) noexcept +{ + return combinedOrders[static_cast(success)][static_cast(fail)]; +} + +template +constexpr inline T Negate(const T val) noexcept +{ + return static_cast(0U - static_cast>(val)); +} + +template +struct SelectIntrinsic +{ + RAD_S_ASSERTMSG( + Size == 0, + "rad::detail::atomic::SelectIntrinsic not supported for this type"); +}; + +template +struct SelectIntrinsic +{ + RAD_S_ASSERT(sizeof(T) == 1); + using Type = char; + + static inline T Load(const volatile T& storage, RelaxedTag) noexcept + { + return static_cast(__iso_volatile_load8(AddrAs(storage))); + } + + template + static inline T Load(const volatile T& storage, OrderTag) noexcept + { + CheckLoadMemoryOrder(); + T ret = static_cast(__iso_volatile_load8(AddrAs(storage))); + RAD_MEM_BARRIER; + return ret; + } + + static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept + { + __iso_volatile_store8(AddrAs(storage), ValAs(val)); + } + + template + static inline void Store(volatile T& storage, + T val, + OrderTag) noexcept + { + CheckStoreMemoryOrder(); + RAD_MEM_BARRIER; + __iso_volatile_store8(AddrAs(storage), ValAs(val)); + } + + static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedExchange8)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedExchange8)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedExchange8)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedExchange8(AddrAs(storage), ValAs(val))); + } + + static inline bool CasRet(T& expected, Type comparand, Type old) noexcept + { + if (old != comparand) + { + expected = static_cast(old); + return false; + } + return true; + } + + template + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + // MSVC does not provide a weak CAS intrinsic for any platform + Ts success; + Tf fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + typename OrderToTag::Type o; + return CompareExchangeStrong(storage, val, expected, o); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + RelaxedTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELAXED(_InterlockedCompareExchange8)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + AcquireTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange8)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELEASE(_InterlockedCompareExchange8)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + SeqCstTag) noexcept + { + return CasRet(expected, + ValAs(expected), + _InterlockedCompareExchange8(AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedExchangeAdd8(AddrAs(storage), ValAs(val))); + } + + template + static inline T FetchSub(volatile T& storage, + T val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedAnd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedAnd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedAnd8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedAnd8(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedOr8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedOr8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedOr8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedOr8(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedXor8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedXor8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedXor8)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedXor8(AddrAs(storage), ValAs(val))); + } +}; + +template +struct SelectIntrinsic +{ + RAD_S_ASSERT(sizeof(T) == 2); + using Type = short; + + static inline T Load(const volatile T& storage, RelaxedTag) noexcept + { + return static_cast(__iso_volatile_load16(AddrAs(storage))); + } + + template + static inline T Load(const volatile T& storage, OrderTag) noexcept + { + CheckLoadMemoryOrder(); + T ret = static_cast(__iso_volatile_load16(AddrAs(storage))); + RAD_MEM_BARRIER; + return ret; + } + + static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept + { + __iso_volatile_store16(AddrAs(storage), ValAs(val)); + } + + template + static inline void Store(volatile T& storage, + T val, + OrderTag) noexcept + { + CheckStoreMemoryOrder(); + RAD_MEM_BARRIER; + __iso_volatile_store16(AddrAs(storage), ValAs(val)); + } + + static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedExchange16)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedExchange16)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedExchange16)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedExchange16(AddrAs(storage), ValAs(val))); + } + + static inline bool CasRet(T& expected, Type comparand, Type old) noexcept + { + if (old != comparand) + { + expected = static_cast(old); + return false; + } + return true; + } + + template + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + // MSVC does not provide a weak CAS intrinsic for any platform + Ts success; + Tf fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + typename OrderToTag::Type o; + return CompareExchangeStrong(storage, val, expected, o); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + RelaxedTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELAXED(_InterlockedCompareExchange16)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + AcquireTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange16)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELEASE(_InterlockedCompareExchange16)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + SeqCstTag) noexcept + { + return CasRet(expected, + ValAs(expected), + _InterlockedCompareExchange16(AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast( + RAD_INTRIN_RELAXED(_InterlockedExchangeAdd16)(AddrAs(storage), + ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast( + RAD_INTRIN_ACQUIRE(_InterlockedExchangeAdd16)(AddrAs(storage), + ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast( + RAD_INTRIN_RELEASE(_InterlockedExchangeAdd16)(AddrAs(storage), + ValAs(val))); + } + + static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedExchangeAdd16(AddrAs(storage), ValAs(val))); + } + + template + static inline T FetchSub(volatile T& storage, + T val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedAnd16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedAnd16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedAnd16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedAnd16(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedOr16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedOr16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedOr16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedOr16(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedXor16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedXor16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedXor16)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedXor16(AddrAs(storage), ValAs(val))); + } +}; + +template +struct SelectIntrinsic +{ + RAD_S_ASSERT(sizeof(T) == 4); + using Type = long; + + static inline T Load(const volatile T& storage, RelaxedTag) noexcept + { + return ValAs(__iso_volatile_load32(AddrAs(storage))); + } + + template + static inline T Load(const volatile T& storage, OrderTag) noexcept + { + CheckLoadMemoryOrder(); + T ret = ValAs(__iso_volatile_load32(AddrAs(storage))); + RAD_MEM_BARRIER; + return ret; + } + + static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept + { + __iso_volatile_store32(AddrAs(storage), ValAs(val)); + } + + template + static inline void Store(volatile T& storage, + T val, + OrderTag) noexcept + { + CheckStoreMemoryOrder(); + RAD_MEM_BARRIER; + __iso_volatile_store32(AddrAs(storage), ValAs(val)); + } + + static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept + { + return ValAs(RAD_INTRIN_RELAXED( + _InterlockedExchange)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept + { + return ValAs(RAD_INTRIN_ACQUIRE( + _InterlockedExchange)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept + { + return ValAs(RAD_INTRIN_RELEASE( + _InterlockedExchange)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept + { + return ValAs( + _InterlockedExchange(AddrAs(storage), ValAs(val))); + } + + static inline bool CasRet(T& expected, Type comparand, Type old) noexcept + { + if (old != comparand) + { + expected = ValAs(old); + return false; + } + return true; + } + + template + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + // MSVC does not provide a weak CAS intrinsic for any platform + Ts success; + Tf fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + typename OrderToTag::Type o; + return CompareExchangeStrong(storage, val, expected, o); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + RelaxedTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELAXED(_InterlockedCompareExchange)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + AcquireTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELEASE(_InterlockedCompareExchange)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + SeqCstTag) noexcept + { + return CasRet(expected, + ValAs(expected), + _InterlockedCompareExchange(AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + RelaxedTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_RELAXED( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + AcquireTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_ACQUIRE( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + ReleaseTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_RELEASE( + _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedExchangeAdd(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + SeqCstTag) noexcept + { + return reinterpret_cast( + _InterlockedExchangeAdd(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + T val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + ptrdiff_t val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedAnd)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedAnd)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedAnd)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedAnd(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedOr)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedOr)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedOr)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedOr(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + _InterlockedXor)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + _InterlockedXor)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + _InterlockedXor)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + _InterlockedXor(AddrAs(storage), ValAs(val))); + } +}; + +template +struct SelectIntrinsic +{ + RAD_S_ASSERT(sizeof(T) == 8); + using Type = __int64; + + // Load and Store operations for 64-bit integrals on x86 (32-bit) will + // produce fild and flstp instructions for atomicity when compiling with + // /kernel. This is due to /kernel forcing IA86 and not allowing override + // with SSE support. This, unfortunately, can cause alignment issues at + // DISPATCH_LEVEL and bug check. + // + // The workaround for this is to use slower atomic exchange and cas. + // This will unfortunately result in all loads/stores on x86 enforcing + // sequential ordering with the associated performance impact. + + static inline T Load(const volatile T& storage, RelaxedTag) noexcept + { +#if RAD_I386 && RAD_KERNEL_MODE + return InterlockedCompareExchange64((volatile LONG64*)&storage, 0, 0); +#else + return ValAs(__iso_volatile_load64(AddrAs(storage))); +#endif + } + + template + static inline T Load(const volatile T& storage, OrderTag) noexcept + { + CheckLoadMemoryOrder(); +#if RAD_I386 && RAD_KERNEL_MODE + return InterlockedCompareExchange64((volatile LONG64*)&storage, 0, 0); +#else + T ret = ValAs(__iso_volatile_load64(AddrAs(storage))); + RAD_MEM_BARRIER; + return ret; +#endif + } + + static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept + { +#if RAD_I386 && RAD_KERNEL_MODE + InterlockedExchange64(AddrAs(storage), val); +#else + __iso_volatile_store64(AddrAs(storage), ValAs(val)); +#endif + } + + template + static inline void Store(volatile T& storage, + T val, + OrderTag) noexcept + { + CheckStoreMemoryOrder(); +#if RAD_I386 && RAD_KERNEL_MODE + InterlockedExchange64(AddrAs(storage), val); +#else + RAD_MEM_BARRIER; + __iso_volatile_store64(AddrAs(storage), ValAs(val)); +#endif + } + + static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept + { + return ValAs(RAD_INTRIN_RELAXED( + InterlockedExchange64)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept + { + return ValAs(RAD_INTRIN_ACQUIRE( + InterlockedExchange64)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept + { + return ValAs(RAD_INTRIN_RELEASE( + InterlockedExchange64)(AddrAs(storage), ValAs(val))); + } + + static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept + { + return ValAs( + InterlockedExchange64(AddrAs(storage), ValAs(val))); + } + + static inline bool CasRet(T& expected, Type comparand, Type old) noexcept + { + if (old != comparand) + { + expected = ValAs(old); + return false; + } + return true; + } + + template + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + // MSVC does not provide a weak CAS intrinsic for any platform + Ts success; + Tf fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + typename OrderToTag::Type o; + return CompareExchangeStrong(storage, val, expected, o); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + RelaxedTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELAXED(_InterlockedCompareExchange64)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + AcquireTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange64)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag) noexcept + { + return CasRet(expected, + ValAs(expected), + RAD_INTRIN_RELEASE(_InterlockedCompareExchange64)( + AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + SeqCstTag) noexcept + { + return CasRet(expected, + ValAs(expected), + _InterlockedCompareExchange64(AddrAs(storage), + ValAs(val), + ValAs(expected))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + RelaxedTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_RELAXED( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + AcquireTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_ACQUIRE( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + ReleaseTag) noexcept + { + return reinterpret_cast(RAD_INTRIN_RELEASE( + InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + InterlockedExchangeAdd64(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + SeqCstTag) noexcept + { + return reinterpret_cast( + InterlockedExchangeAdd64(AddrAs(storage), ValAs(val))); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + T val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + ptrdiff_t val, + OrderTag) noexcept + { + typename OrderTag::Type order; + return FetchAdd(storage, Negate(val), order); + } + + static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + InterlockedAnd64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + InterlockedAnd64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + InterlockedAnd64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + InterlockedAnd64(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + InterlockedOr64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + InterlockedOr64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + InterlockedOr64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + InterlockedOr64(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept + { + return static_cast(RAD_INTRIN_RELAXED( + InterlockedXor64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept + { + return static_cast(RAD_INTRIN_ACQUIRE( + InterlockedXor64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept + { + return static_cast(RAD_INTRIN_RELEASE( + InterlockedXor64)(AddrAs(storage), ValAs(val))); + } + + static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept + { + return static_cast( + InterlockedXor64(AddrAs(storage), ValAs(val))); + } +}; + +// NOLINTEND(misc-include-cleaner) +#elif defined(RAD_GCC_VERSION) || defined(RAD_CLANG_VERSION) +template +struct SelectIntrinsic +{ + RAD_S_ASSERTMSG( + (IsIntegral && sizeof(T) == Size && + (Size == 1 || Size == 2 || Size == 4 || Size == 8)) || + (IsPointer && Size == sizeof(void*)), + "rad::detail::atomic::SelectIntrinsic not supported for this type"); + + template + static inline T Load(const volatile T& storage, OrderTag) noexcept + { + CheckLoadMemoryOrder(); + constexpr int order = static_cast(TOrder::Order); + return __atomic_load_n(&storage, order); + } + + template + static inline void Store(volatile T& storage, + T val, + OrderTag) noexcept + { + CheckStoreMemoryOrder(); + constexpr int order = static_cast(TOrder::Order); + __atomic_store_n(&storage, val, order); + } + + template + static inline T Exchange(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_exchange_n(&storage, val, order); + } + + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeWeak(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeWeak( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeWeak(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeWeak(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + constexpr int success = static_cast(Ts::Order); + constexpr int fail = static_cast(Tf::Order); + return __atomic_compare_exchange_n(&storage, + &expected, + val, + true, + success, + fail); + } + + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + ReleaseTag, + ReleaseTag) noexcept + { + ReleaseTag success; + RelaxedTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + static inline bool CompareExchangeStrong( + volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept + { + AcqRelTag success; + AcquireTag fail; + return CompareExchangeStrong(storage, val, expected, success, fail); + } + + template + static inline bool CompareExchangeStrong(volatile T& storage, + T val, + T& expected, + OrderTag, + OrderTag) noexcept + { + CheckLoadMemoryOrder(); + CheckCasMemoryOrdering(); + constexpr int success = static_cast(Ts::Order); + constexpr int fail = static_cast(Tf::Order); + return __atomic_compare_exchange_n(&storage, + &expected, + val, + false, + success, + fail); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_add(&storage, val, order); + } + + template , int> = 0> + static inline T FetchAdd(volatile T& storage, + ptrdiff_t val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_add(&storage, val, order); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_sub(&storage, val, order); + } + + template , int> = 0> + static inline T FetchSub(volatile T& storage, + ptrdiff_t val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_sub(&storage, val, order); + } + + template + static inline T FetchAnd(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_and(&storage, val, order); + } + + template + static inline T FetchOr(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_or(&storage, val, order); + } + + template + static inline T FetchXor(volatile T& storage, + T val, + OrderTag) noexcept + { + constexpr int order = static_cast(TOrder::Order); + return __atomic_fetch_xor(&storage, val, order); + } +}; #endif } // namespace atomic diff --git a/radiant/detail/IntrinsicTraits.h b/radiant/detail/IntrinsicTraits.h index 58ce4dd..4b3590f 100644 --- a/radiant/detail/IntrinsicTraits.h +++ b/radiant/detail/IntrinsicTraits.h @@ -14,40 +14,35 @@ #pragma once -#ifdef RAD_MSC_VERSION -// MSVC does not seem to have something like __has_builtin, but -// also seems to define a superset of what the other compilers do -// in terms of intrinsics -#define __has_builtin(v) 1 -#endif +#include "radiant/TotallyRad.h" -#if __has_builtin(__is_enum) +#if RAD_HAS_BUILTIN(__is_enum) #define INTRINSIC_IS_ENUM(T) __is_enum(T) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_enum") #endif -#if __has_builtin(__is_base_of) +#if RAD_HAS_BUILTIN(__is_base_of) #define INTRINSIC_IS_BASE_OF(B, D) __is_base_of(B, D) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_base_of") #endif -#if __has_builtin(__is_empty) +#if RAD_HAS_BUILTIN(__is_empty) #define INTRINSIC_IS_EMPTY(T) __is_empty(T) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_empty") #endif -#if __has_builtin(__is_polymorphic) +#if RAD_HAS_BUILTIN(__is_polymorphic) #define INTRINSIC_IS_POLYMORPHIC(T) __is_polymorphic(T) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_polymorphic") #endif -#if __has_builtin(__is_trivially_destructible) +#if RAD_HAS_BUILTIN(__is_trivially_destructible) #define INTRINSIC_IS_TRIVIALLY_DESTRUCTIBLE(T) __is_trivially_destructible(T) -#elif __has_builtin(__has_trivial_destructor) +#elif RAD_HAS_BUILTIN(__has_trivial_destructor) #define INTRINSIC_IS_TRIVIALLY_DESTRUCTIBLE(T) __has_trivial_destructor(T) #else RAD_S_ASSERTMSG(false, @@ -55,27 +50,27 @@ RAD_S_ASSERTMSG(false, "__is_trivially_destructible or __has_trivial_destructor"); #endif -#if __has_builtin(__is_assignable) +#if RAD_HAS_BUILTIN(__is_assignable) #define INTRINSIC_IS_ASSIGNABLE(L, R) __is_assignable(L, R) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_assignable"); #endif -#if __has_builtin(__is_constructible) +#if RAD_HAS_BUILTIN(__is_constructible) #define INTRINSIC_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_constructible"); #endif -#if __has_builtin(__is_trivially_assignable) +#if RAD_HAS_BUILTIN(__is_trivially_assignable) #define INTRINSIC_IS_TRIVIALLY_ASSIGNABLE(L, R) __is_trivially_assignable(L, R) #else RAD_S_ASSERTMSG( false, "compiler does not support intrinsic __is_trivially_assignable"); #endif -#if __has_builtin(__is_trivially_constructible) +#if RAD_HAS_BUILTIN(__is_trivially_constructible) #define INTRINSIC_IS_TRIVIALLY_CONSTRUCTIBLE(...) \ __is_trivially_constructible(__VA_ARGS__) #else @@ -83,14 +78,14 @@ RAD_S_ASSERTMSG( false, "compiler does not support intrinsic __is_trivially_constructible"); #endif -#if __has_builtin(__is_nothrow_assignable) || defined(RAD_GCC_VERSION) +#if RAD_HAS_BUILTIN(__is_nothrow_assignable) || defined(RAD_GCC_VERSION) #define INTRINSIC_IS_NOTHROW_ASSIGNABLE(L, R) __is_nothrow_assignable(L, R) #else RAD_S_ASSERTMSG(false, "compiler does not support intrinsic __is_nothrow_assignable"); #endif -#if __has_builtin(__is_nothrow_constructible) || defined(RAD_GCC_VERSION) +#if RAD_HAS_BUILTIN(__is_nothrow_constructible) || defined(RAD_GCC_VERSION) #define INTRINSIC_IS_NOTHROW_CONSTRUCTIBLE(...) \ __is_nothrow_constructible(__VA_ARGS__) #else @@ -98,7 +93,7 @@ RAD_S_ASSERTMSG( false, "compiler does not support intrinsic __is_nothrow_constructible"); #endif -#if __has_builtin(__is_trivial) +#if RAD_HAS_BUILTIN(__is_trivial) #define IS_TRIVIAL_IMPL(T) __is_trivial(T) #else #define IS_TRIVIAL_IMPL(T) \ @@ -106,7 +101,7 @@ RAD_S_ASSERTMSG( is_trivially_default_constructible::value) #endif -#if __has_builtin(__has_virtual_destructor) +#if RAD_HAS_BUILTIN(__has_virtual_destructor) #define INTRINSIC_HAS_VIRTUAL_DESTRUCTOR(T) __has_virtual_destructor(T) #else RAD_S_ASSERTMSG(false, diff --git a/radiant/detail/Meta.h b/radiant/detail/Meta.h index 8b2f395..8d1d832 100644 --- a/radiant/detail/Meta.h +++ b/radiant/detail/Meta.h @@ -14,7 +14,10 @@ #pragma once -#include +#include "radiant/TotallyRad.h" + +#include +#include namespace rad { diff --git a/radiant/detail/StdTypeTraits.h b/radiant/detail/StdTypeTraits.h index b5ce6d9..dbb8158 100644 --- a/radiant/detail/StdTypeTraits.h +++ b/radiant/detail/StdTypeTraits.h @@ -14,6 +14,13 @@ #pragma once +#include "radiant/TotallyRad.h" +#include "radiant/detail/IntrinsicTraits.h" +#include "radiant/detail/Meta.h" + +#include +#include + #define USING_STL_TYPETRAITS 0 #if USING_STL_TYPETRAITS @@ -117,10 +124,6 @@ typename add_rvalue_reference::type declval() noexcept #else -#include "radiant/TotallyRad.h" -#include "radiant/detail/Meta.h" -#include "radiant/detail/IntrinsicTraits.h" - namespace rad { @@ -302,7 +305,7 @@ struct remove_all_extents using type = typename remove_all_extents::type; }; -template +template struct remove_all_extents { using type = typename remove_all_extents::type; diff --git a/radiant/detail/VectorOperations.h b/radiant/detail/VectorOperations.h index dbc61b8..d5285f3 100644 --- a/radiant/detail/VectorOperations.h +++ b/radiant/detail/VectorOperations.h @@ -14,10 +14,14 @@ #pragma once +#include "radiant/TotallyRad.h" +#include "radiant/Algorithm.h" // NOLINT(misc-include-cleaner) +#include "radiant/Iterator.h" #include "radiant/Res.h" -#include "radiant/Utility.h" #include "radiant/Span.h" -#include "radiant/Algorithm.h" +#include "radiant/TypeTraits.h" + +#include namespace rad { diff --git a/radiant/detail/gcc/AtomicIntrinsics.h b/radiant/detail/gcc/AtomicIntrinsics.h deleted file mode 100644 index 58cc221..0000000 --- a/radiant/detail/gcc/AtomicIntrinsics.h +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2023 The Radiant Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -template -struct SelectIntrinsic -{ - RAD_S_ASSERTMSG( - (IsIntegral && sizeof(T) == Size && - (Size == 1 || Size == 2 || Size == 4 || Size == 8)) || - (IsPointer && Size == sizeof(void*)), - "rad::detail::atomic::SelectIntrinsic not supported for this type"); - - template - static inline T Load(const volatile T& storage, OrderTag) noexcept - { - CheckLoadMemoryOrder(); - constexpr int order = static_cast(TOrder::Order); - return __atomic_load_n(&storage, order); - } - - template - static inline void Store(volatile T& storage, - T val, - OrderTag) noexcept - { - CheckStoreMemoryOrder(); - constexpr int order = static_cast(TOrder::Order); - __atomic_store_n(&storage, val, order); - } - - template - static inline T Exchange(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_exchange_n(&storage, val, order); - } - - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeWeak(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeWeak( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeWeak(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - constexpr int success = static_cast(Ts::Order); - constexpr int fail = static_cast(Tf::Order); - return __atomic_compare_exchange_n(&storage, - &expected, - val, - true, - success, - fail); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - constexpr int success = static_cast(Ts::Order); - constexpr int fail = static_cast(Tf::Order); - return __atomic_compare_exchange_n(&storage, - &expected, - val, - false, - success, - fail); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_add(&storage, val, order); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_add(&storage, val, order); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_sub(&storage, val, order); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - ptrdiff_t val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_sub(&storage, val, order); - } - - template - static inline T FetchAnd(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_and(&storage, val, order); - } - - template - static inline T FetchOr(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_or(&storage, val, order); - } - - template - static inline T FetchXor(volatile T& storage, - T val, - OrderTag) noexcept - { - constexpr int order = static_cast(TOrder::Order); - return __atomic_fetch_xor(&storage, val, order); - } -}; diff --git a/radiant/detail/msvc/AtomicIntrinsics.h b/radiant/detail/msvc/AtomicIntrinsics.h deleted file mode 100644 index 4a29902..0000000 --- a/radiant/detail/msvc/AtomicIntrinsics.h +++ /dev/null @@ -1,1271 +0,0 @@ -// Copyright 2023 The Radiant Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) -#define RAD_MEM_BARRIER __dmb(0xb); // _ARM_BARRIER_ISH / _ARM64_BARRIER_ISH -#define RAD_INTRIN_RELAXED(Intrin) RAD_CONCAT(Intrin, _nf) -#define RAD_INTRIN_ACQUIRE(Intrin) RAD_CONCAT(Intrin, _acq) -#define RAD_INTRIN_RELEASE(Intrin) RAD_CONCAT(Intrin, _rel) -#elif defined(_M_IX86) || defined(_M_AMD64) -#define RAD_MEM_BARRIER \ - _Pragma("warning(push)") _Pragma("warning(disable : 4996)") \ - _ReadWriteBarrier() _Pragma("warning(pop)") -#define RAD_INTRIN_RELAXED(Intrin) Intrin -#define RAD_INTRIN_ACQUIRE(Intrin) Intrin -#define RAD_INTRIN_RELEASE(Intrin) Intrin -#endif - -// clang-format off -RAD_INLINE_VAR constexpr MemoryOrder combinedOrders[6][6] = { - {MemoryOrder::Relaxed, MemoryOrder::Consume, MemoryOrder::Acquire, MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, - {MemoryOrder::Consume, MemoryOrder::Consume, MemoryOrder::Acquire, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, - {MemoryOrder::Acquire, MemoryOrder::Acquire, MemoryOrder::Acquire, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, - {MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::Release, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, - {MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::AcqRel, MemoryOrder::SeqCst}, - {MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst, MemoryOrder::SeqCst}}; - -// clang-format on - -constexpr inline MemoryOrder CombineMemoryOrders(MemoryOrder success, - MemoryOrder fail) noexcept -{ - return combinedOrders[static_cast(success)][static_cast(fail)]; -} - -template -constexpr inline T Negate(const T val) noexcept -{ - return static_cast(0U - static_cast>(val)); -} - -template -struct SelectIntrinsic -{ - RAD_S_ASSERTMSG( - Size == 0, - "rad::detail::atomic::SelectIntrinsic not supported for this type"); -}; - -template -struct SelectIntrinsic -{ - RAD_S_ASSERT(sizeof(T) == 1); - using Type = char; - - static inline T Load(const volatile T& storage, RelaxedTag) noexcept - { - return static_cast(__iso_volatile_load8(AddrAs(storage))); - } - - template - static inline T Load(const volatile T& storage, OrderTag) noexcept - { - CheckLoadMemoryOrder(); - T ret = static_cast(__iso_volatile_load8(AddrAs(storage))); - RAD_MEM_BARRIER; - return ret; - } - - static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept - { - __iso_volatile_store8(AddrAs(storage), ValAs(val)); - } - - template - static inline void Store(volatile T& storage, - T val, - OrderTag) noexcept - { - CheckStoreMemoryOrder(); - RAD_MEM_BARRIER; - __iso_volatile_store8(AddrAs(storage), ValAs(val)); - } - - static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedExchange8)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedExchange8)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedExchange8)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedExchange8(AddrAs(storage), ValAs(val))); - } - - static inline bool CasRet(T& expected, Type comparand, Type old) noexcept - { - if (old != comparand) - { - expected = static_cast(old); - return false; - } - return true; - } - - template - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - // MSVC does not provide a weak CAS intrinsic for any platform - Ts success; - Tf fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - typename OrderToTag::Type o; - return CompareExchangeStrong(storage, val, expected, o); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - RelaxedTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELAXED(_InterlockedCompareExchange8)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - AcquireTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange8)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELEASE(_InterlockedCompareExchange8)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - SeqCstTag) noexcept - { - return CasRet(expected, - ValAs(expected), - _InterlockedCompareExchange8(AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedExchangeAdd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedExchangeAdd8(AddrAs(storage), ValAs(val))); - } - - template - static inline T FetchSub(volatile T& storage, - T val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedAnd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedAnd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedAnd8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedAnd8(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedOr8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedOr8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedOr8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedOr8(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedXor8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedXor8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedXor8)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedXor8(AddrAs(storage), ValAs(val))); - } -}; - -template -struct SelectIntrinsic -{ - RAD_S_ASSERT(sizeof(T) == 2); - using Type = short; - - static inline T Load(const volatile T& storage, RelaxedTag) noexcept - { - return static_cast(__iso_volatile_load16(AddrAs(storage))); - } - - template - static inline T Load(const volatile T& storage, OrderTag) noexcept - { - CheckLoadMemoryOrder(); - T ret = static_cast(__iso_volatile_load16(AddrAs(storage))); - RAD_MEM_BARRIER; - return ret; - } - - static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept - { - __iso_volatile_store16(AddrAs(storage), ValAs(val)); - } - - template - static inline void Store(volatile T& storage, - T val, - OrderTag) noexcept - { - CheckStoreMemoryOrder(); - RAD_MEM_BARRIER; - __iso_volatile_store16(AddrAs(storage), ValAs(val)); - } - - static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedExchange16)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedExchange16)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedExchange16)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedExchange16(AddrAs(storage), ValAs(val))); - } - - static inline bool CasRet(T& expected, Type comparand, Type old) noexcept - { - if (old != comparand) - { - expected = static_cast(old); - return false; - } - return true; - } - - template - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - // MSVC does not provide a weak CAS intrinsic for any platform - Ts success; - Tf fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - typename OrderToTag::Type o; - return CompareExchangeStrong(storage, val, expected, o); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - RelaxedTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELAXED(_InterlockedCompareExchange16)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - AcquireTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange16)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELEASE(_InterlockedCompareExchange16)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - SeqCstTag) noexcept - { - return CasRet(expected, - ValAs(expected), - _InterlockedCompareExchange16(AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast( - RAD_INTRIN_RELAXED(_InterlockedExchangeAdd16)(AddrAs(storage), - ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast( - RAD_INTRIN_ACQUIRE(_InterlockedExchangeAdd16)(AddrAs(storage), - ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast( - RAD_INTRIN_RELEASE(_InterlockedExchangeAdd16)(AddrAs(storage), - ValAs(val))); - } - - static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedExchangeAdd16(AddrAs(storage), ValAs(val))); - } - - template - static inline T FetchSub(volatile T& storage, - T val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedAnd16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedAnd16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedAnd16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedAnd16(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedOr16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedOr16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedOr16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedOr16(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedXor16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedXor16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedXor16)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedXor16(AddrAs(storage), ValAs(val))); - } -}; - -template -struct SelectIntrinsic -{ - RAD_S_ASSERT(sizeof(T) == 4); - using Type = long; - - static inline T Load(const volatile T& storage, RelaxedTag) noexcept - { - return ValAs(__iso_volatile_load32(AddrAs(storage))); - } - - template - static inline T Load(const volatile T& storage, OrderTag) noexcept - { - CheckLoadMemoryOrder(); - T ret = ValAs(__iso_volatile_load32(AddrAs(storage))); - RAD_MEM_BARRIER; - return ret; - } - - static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept - { - __iso_volatile_store32(AddrAs(storage), ValAs(val)); - } - - template - static inline void Store(volatile T& storage, - T val, - OrderTag) noexcept - { - CheckStoreMemoryOrder(); - RAD_MEM_BARRIER; - __iso_volatile_store32(AddrAs(storage), ValAs(val)); - } - - static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept - { - return ValAs(RAD_INTRIN_RELAXED( - _InterlockedExchange)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept - { - return ValAs(RAD_INTRIN_ACQUIRE( - _InterlockedExchange)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept - { - return ValAs(RAD_INTRIN_RELEASE( - _InterlockedExchange)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept - { - return ValAs( - _InterlockedExchange(AddrAs(storage), ValAs(val))); - } - - static inline bool CasRet(T& expected, Type comparand, Type old) noexcept - { - if (old != comparand) - { - expected = ValAs(old); - return false; - } - return true; - } - - template - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - // MSVC does not provide a weak CAS intrinsic for any platform - Ts success; - Tf fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - typename OrderToTag::Type o; - return CompareExchangeStrong(storage, val, expected, o); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - RelaxedTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELAXED(_InterlockedCompareExchange)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - AcquireTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELEASE(_InterlockedCompareExchange)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - SeqCstTag) noexcept - { - return CasRet(expected, - ValAs(expected), - _InterlockedCompareExchange(AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - RelaxedTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_RELAXED( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - AcquireTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_ACQUIRE( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - ReleaseTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_RELEASE( - _InterlockedExchangeAdd)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedExchangeAdd(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - SeqCstTag) noexcept - { - return reinterpret_cast( - _InterlockedExchangeAdd(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - T val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - ptrdiff_t val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedAnd)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedAnd)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedAnd)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedAnd(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedOr)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedOr)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedOr)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedOr(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - _InterlockedXor)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - _InterlockedXor)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - _InterlockedXor)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - _InterlockedXor(AddrAs(storage), ValAs(val))); - } -}; - -template -struct SelectIntrinsic -{ - RAD_S_ASSERT(sizeof(T) == 8); - using Type = __int64; - - // Load and Store operations for 64-bit integrals on x86 (32-bit) will - // produce fild and flstp instructions for atomicity when compiling with - // /kernel. This is due to /kernel forcing IA86 and not allowing override - // with SSE support. This, unfortunately, can cause alignment issues at - // DISPATCH_LEVEL and bug check. - // - // The workaround for this is to use slower atomic exchange and cas. - // This will unfortunately result in all loads/stores on x86 enforcing - // sequential ordering with the associated performance impact. - - static inline T Load(const volatile T& storage, RelaxedTag) noexcept - { -#if RAD_I386 && RAD_KERNEL_MODE - return InterlockedCompareExchange64((volatile LONG64*)&storage, 0, 0); -#else - return ValAs(__iso_volatile_load64(AddrAs(storage))); -#endif - } - - template - static inline T Load(const volatile T& storage, OrderTag) noexcept - { - CheckLoadMemoryOrder(); -#if RAD_I386 && RAD_KERNEL_MODE - return InterlockedCompareExchange64((volatile LONG64*)&storage, 0, 0); -#else - T ret = ValAs(__iso_volatile_load64(AddrAs(storage))); - RAD_MEM_BARRIER; - return ret; -#endif - } - - static inline void Store(volatile T& storage, T val, RelaxedTag) noexcept - { -#if RAD_I386 && RAD_KERNEL_MODE - InterlockedExchange64(AddrAs(storage), val); -#else - __iso_volatile_store64(AddrAs(storage), ValAs(val)); -#endif - } - - template - static inline void Store(volatile T& storage, - T val, - OrderTag) noexcept - { - CheckStoreMemoryOrder(); -#if RAD_I386 && RAD_KERNEL_MODE - InterlockedExchange64(AddrAs(storage), val); -#else - RAD_MEM_BARRIER; - __iso_volatile_store64(AddrAs(storage), ValAs(val)); -#endif - } - - static inline T Exchange(volatile T& storage, T val, RelaxedTag) noexcept - { - return ValAs(RAD_INTRIN_RELAXED( - InterlockedExchange64)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, AcquireTag) noexcept - { - return ValAs(RAD_INTRIN_ACQUIRE( - InterlockedExchange64)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, ReleaseTag) noexcept - { - return ValAs(RAD_INTRIN_RELEASE( - InterlockedExchange64)(AddrAs(storage), ValAs(val))); - } - - static inline T Exchange(volatile T& storage, T val, SeqCstTag) noexcept - { - return ValAs( - InterlockedExchange64(AddrAs(storage), ValAs(val))); - } - - static inline bool CasRet(T& expected, Type comparand, Type old) noexcept - { - if (old != comparand) - { - expected = ValAs(old); - return false; - } - return true; - } - - template - static inline bool CompareExchangeWeak(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - // MSVC does not provide a weak CAS intrinsic for any platform - Ts success; - Tf fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag, - ReleaseTag) noexcept - { - ReleaseTag success; - RelaxedTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - static inline bool CompareExchangeStrong( - volatile T& storage, T val, T& expected, AcqRelTag, AcqRelTag) noexcept - { - AcqRelTag success; - AcquireTag fail; - return CompareExchangeStrong(storage, val, expected, success, fail); - } - - template - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - OrderTag, - OrderTag) noexcept - { - CheckLoadMemoryOrder(); - CheckCasMemoryOrdering(); - typename OrderToTag::Type o; - return CompareExchangeStrong(storage, val, expected, o); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - RelaxedTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELAXED(_InterlockedCompareExchange64)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - AcquireTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_ACQUIRE(_InterlockedCompareExchange64)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - ReleaseTag) noexcept - { - return CasRet(expected, - ValAs(expected), - RAD_INTRIN_RELEASE(_InterlockedCompareExchange64)( - AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - static inline bool CompareExchangeStrong(volatile T& storage, - T val, - T& expected, - SeqCstTag) noexcept - { - return CasRet(expected, - ValAs(expected), - _InterlockedCompareExchange64(AddrAs(storage), - ValAs(val), - ValAs(expected))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - RelaxedTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_RELAXED( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - AcquireTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_ACQUIRE( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - ReleaseTag) noexcept - { - return reinterpret_cast(RAD_INTRIN_RELEASE( - InterlockedExchangeAdd64)(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - InterlockedExchangeAdd64(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchAdd(volatile T& storage, - ptrdiff_t val, - SeqCstTag) noexcept - { - return reinterpret_cast( - InterlockedExchangeAdd64(AddrAs(storage), ValAs(val))); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - T val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - template , int> = 0> - static inline T FetchSub(volatile T& storage, - ptrdiff_t val, - OrderTag) noexcept - { - typename OrderTag::Type order; - return FetchAdd(storage, Negate(val), order); - } - - static inline T FetchAnd(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - InterlockedAnd64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - InterlockedAnd64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - InterlockedAnd64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchAnd(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - InterlockedAnd64(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - InterlockedOr64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - InterlockedOr64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - InterlockedOr64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchOr(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - InterlockedOr64(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, RelaxedTag) noexcept - { - return static_cast(RAD_INTRIN_RELAXED( - InterlockedXor64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, AcquireTag) noexcept - { - return static_cast(RAD_INTRIN_ACQUIRE( - InterlockedXor64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, ReleaseTag) noexcept - { - return static_cast(RAD_INTRIN_RELEASE( - InterlockedXor64)(AddrAs(storage), ValAs(val))); - } - - static inline T FetchXor(volatile T& storage, T val, SeqCstTag) noexcept - { - return static_cast( - InterlockedXor64(AddrAs(storage), ValAs(val))); - } -}; diff --git a/test/TestThrow.h b/test/TestThrow.h index a565656..002bc9d 100644 --- a/test/TestThrow.h +++ b/test/TestThrow.h @@ -14,8 +14,8 @@ #include "radiant/Utility.h" -#include #include +#include namespace radtest { diff --git a/test/test_EmptyOptimizedPair.cpp b/test/test_EmptyOptimizedPair.cpp index d8c399a..b452441 100644 --- a/test/test_EmptyOptimizedPair.cpp +++ b/test/test_EmptyOptimizedPair.cpp @@ -12,12 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - -#include "test/TestAlloc.h" - #include "radiant/EmptyOptimizedPair.h" +#include "gtest/gtest.h" +#include "test/TestAlloc.h" #include struct Empty diff --git a/test/test_Integer.cpp b/test/test_Integer.cpp index c802959..6390c6f 100644 --- a/test/test_Integer.cpp +++ b/test/test_Integer.cpp @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" #include "radiant/Integer.h" +#include "gtest/gtest.h" + // clang-format off RAD_S_ASSERT(noexcept(rad::i8())); diff --git a/test/test_Iterator.cpp b/test/test_Iterator.cpp index 516e433..b18de45 100644 --- a/test/test_Iterator.cpp +++ b/test/test_Iterator.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include "radiant/Iterator.h" +#include "gtest/gtest.h" + struct Data { int value; diff --git a/test/test_Locks.cpp b/test/test_Locks.cpp index 7d2fecc..54a532c 100644 --- a/test/test_Locks.cpp +++ b/test/test_Locks.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include "radiant/Locks.h" +#include "gtest/gtest.h" + struct TestLock { void LockExclusive() noexcept diff --git a/test/test_Result.cpp b/test/test_Result.cpp index 0de5070..c94b17e 100644 --- a/test/test_Result.cpp +++ b/test/test_Result.cpp @@ -19,14 +19,12 @@ #define RAD_ENABLE_NOTHROW_DTOR_ASSERTIONS 0 #define RAD_ENABLE_NOTHROW_MOVE_ASSERTIONS 0 -#include "gtest/gtest.h" - -#include "test/TestThrow.h" -#include "test/TestMove.h" - #include "radiant/Result.h" #include "radiant/Utility.h" +#include "gtest/gtest.h" +#include "test/TestMove.h" +#include "test/TestThrow.h" #include #include diff --git a/test/test_ScopeExit.cpp b/test/test_ScopeExit.cpp index 51c6d09..346e9a3 100644 --- a/test/test_ScopeExit.cpp +++ b/test/test_ScopeExit.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include "radiant/ScopeExit.h" +#include "gtest/gtest.h" + namespace { diff --git a/test/test_SharedPtr.cpp b/test/test_SharedPtr.cpp index 36bc360..b8afe6b 100644 --- a/test/test_SharedPtr.cpp +++ b/test/test_SharedPtr.cpp @@ -13,9 +13,8 @@ // limitations under the License. #include "gtest/gtest.h" - -#include "test/TestThrow.h" #include "test/TestAlloc.h" +#include "test/TestThrow.h" #define RAD_DEFAULT_ALLOCATOR radtest::Allocator #include "radiant/SharedPtr.h" diff --git a/test/test_Span.cpp b/test/test_Span.cpp index e3cc554..9ae4668 100644 --- a/test/test_Span.cpp +++ b/test/test_Span.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include "radiant/Span.h" #include "radiant/Utility.h" + +#include "gtest/gtest.h" #include "test/TestThrow.h" static constexpr auto g_SpanString = rad::MakeSpan("Span String"); diff --git a/test/test_StdTypeTraits.cpp b/test/test_StdTypeTraits.cpp index 565317e..d147c83 100644 --- a/test/test_StdTypeTraits.cpp +++ b/test/test_StdTypeTraits.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - -#include "radiant/detail/StdTypeTraits.h" #include "radiant/detail/Meta.h" +#include "radiant/detail/StdTypeTraits.h" + +#include "gtest/gtest.h" namespace rad { diff --git a/test/test_TypeWrapper.cpp b/test/test_TypeWrapper.cpp index 6822ddf..90c44a7 100644 --- a/test/test_TypeWrapper.cpp +++ b/test/test_TypeWrapper.cpp @@ -19,12 +19,11 @@ #define RAD_ENABLE_NOTHROW_DTOR_ASSERTIONS 0 #define RAD_ENABLE_NOTHROW_MOVE_ASSERTIONS 0 -#include "gtest/gtest.h" +#include "radiant/TypeWrapper.h" +#include "gtest/gtest.h" #include "test/TestThrow.h" -#include "radiant/TypeWrapper.h" - template using TW = rad::TypeWrapper; using NTO = radtest::NonThrowingObject; diff --git a/test/test_UniqueResource.cpp b/test/test_UniqueResource.cpp index b73acc2..1b53b67 100644 --- a/test/test_UniqueResource.cpp +++ b/test/test_UniqueResource.cpp @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include "radiant/UniqueResource.h" +#include "gtest/gtest.h" + static int g_IsValidCalls = 0; static int g_CloseCalls = 0; static int g_CloserCalls = 0; diff --git a/test/test_Utility.cpp b/test/test_Utility.cpp index 77a8876..7c5f080 100644 --- a/test/test_Utility.cpp +++ b/test/test_Utility.cpp @@ -19,10 +19,9 @@ #define RAD_ENABLE_NOTHROW_DTOR_ASSERTIONS 0 #define RAD_ENABLE_NOTHROW_MOVE_ASSERTIONS 0 -#include "gtest/gtest.h" - #include "radiant/Utility.h" +#include "gtest/gtest.h" #include "test/TestMove.h" RAD_S_ASSERT(noexcept(rad::Forward(rad::DeclVal()))); diff --git a/test/test_Vector.cpp b/test/test_Vector.cpp index 97b84a3..a47c6ab 100644 --- a/test/test_Vector.cpp +++ b/test/test_Vector.cpp @@ -20,7 +20,6 @@ #define RAD_ENABLE_NOTHROW_MOVE_ASSERTIONS 0 #include "gtest/gtest.h" - #include "test/TestAlloc.h" #define RAD_DEFAULT_ALLOCATOR radtest::Allocator diff --git a/tools/rad/pyproject.toml b/tools/rad/pyproject.toml index d6f34db..11faf13 100644 --- a/tools/rad/pyproject.toml +++ b/tools/rad/pyproject.toml @@ -7,6 +7,7 @@ authors = [{ name = "Radiant Authors" }] dependencies = [ "pre-commit", "clang-format", + "clang-tidy", "pylint", "black", "lcov_cobertura", diff --git a/tools/rad/rad/bazel.py b/tools/rad/rad/bazel.py index e935058..6b6cac6 100644 --- a/tools/rad/rad/bazel.py +++ b/tools/rad/rad/bazel.py @@ -107,13 +107,13 @@ def test(label, args=None): return res -def generate_compile_commands(): - """Generates compile_commands.json using bazel.""" - logging.info("generating compile_commands.json") - res = BAZEL.run(["run", "@hedron_compile_commands//:refresh_all"]) +def refresh_compile_commands(): + """Refreshes compile_commands.json using bazel.""" + logging.info("refreshing compile_commands.json") + res = BAZEL.run(["run", ":refresh_compile_commands"]) logging.log( logging.INFO if res else logging.ERROR, - "compile_commands.json generated" if res else "compile_commands.json failed!", + "compile_commands.json refreshed" if res else "compile_commands.json failed!", ) return res diff --git a/tools/rad/rad/clang_tidy.py b/tools/rad/rad/clang_tidy.py new file mode 100644 index 0000000..92c03f8 --- /dev/null +++ b/tools/rad/rad/clang_tidy.py @@ -0,0 +1,87 @@ +#! /usr/bin/env python +# +# Copyright 2023 The Radiant Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Bootstrap for clang-tidy.""" + +import os +import sys +import subprocess +from concurrent.futures import ThreadPoolExecutor, as_completed +import rad.repo +import rad.bazel + +CHUNK_SIZE = 3 +TEST_DIR = rad.repo.ROOT_PATH / "test" + + +def _run_clang_tidy(args, files) -> bool: + args = args + ["--extra-arg=-v"] + proc = subprocess.run( + ["clang-tidy", *args, *files], + capture_output=True, + text=True, + check=False, + cwd=rad.repo.ROOT_PATH, + ) + if proc.stderr == "Error: no checks enabled.\n": + return True + # print(" ".join(str(a) for a in proc.args)) + # print(proc.stderr) + print(proc.stdout) + return proc.returncode == 0 + + +def bootstrap(args) -> bool: + """Bootstraps clang-tidy.""" + + if os.name == "nt": + # Hack for Windows. Poor interplay with Bazel, compile_commands.json, + # and clang-tidy causes issues with header files. We need to add the + # current directory to the include path and disable some warnings. This + # is a temporary workaround until we can find a better solution. + clang_tidy_args = [ + "--extra-arg=-I.", + "--extra-arg=-Wno-pragma-once-outside-header", + "--extra-arg=-Wno-builtin-macro-redefined", + ] + elif sys.platform == "darwin": + # There are some unfortunate side effects with Bazel compile command + # generation on macOS. I've tired a few hacks without success. Here is + # some related reading on work on the topic: + # + # https://stackoverflow.com/questions/58466701/clang-tidy-cant-locate-stdlib-headers + # https://github.com/llvm/llvm-project/issues/52606 + # https://github.com/hedronvision/bazel-compile-commands-extractor/issues/52 + # https://github.com/erenon/bazel_clang_tidy/issues/20 + # + # N.B. We do not raise or return false here since this is used by the + # pre-commit hook and we do not want to block commits on macOS. + print("macOS is not yet supported") + return True + else: + clang_tidy_args = [] + + result = True + args_chunks = [args[i : i + CHUNK_SIZE] for i in range(0, len(args), CHUNK_SIZE)] + with ThreadPoolExecutor() as executor: + futures = [ + executor.submit(_run_clang_tidy, clang_tidy_args, chunk) + for chunk in args_chunks + ] + for future in as_completed(futures): + if future.result() is False: + result = False + return result diff --git a/tools/rad/rad/cli.py b/tools/rad/rad/cli.py index e426e2d..9837bd0 100644 --- a/tools/rad/rad/cli.py +++ b/tools/rad/rad/cli.py @@ -24,6 +24,7 @@ import sys import rad.bazel +import rad.clang_tidy import rad.coverage import rad.intellisense import rad.repo @@ -42,8 +43,8 @@ def init(args) -> bool: # pylint: disable=unused-argument ], check=False, ) - logging.info("Generating compile_commands.json...") - rad.bazel.generate_compile_commands() + logging.info("Refreshing compile_commands.json...") + rad.bazel.refresh_compile_commands() # Disabled in favor of compile_commands.json. Required admin on Windows to # initialize. Might be removed completely in the future. # logging.info("Setting up vscode c_cpp_properties.json...") @@ -139,6 +140,8 @@ def coverage(args) -> bool: def lint(args) -> bool: """Runs lint checks for the Radiant project.""" + if args.clang_tidy is not None: + return rad.clang_tidy.bootstrap(args.clang_tidy) env = os.environ.copy() if args.skip: env["SKIP"] = args.skip @@ -324,6 +327,11 @@ def main() -> int: required=False, help="skip the specified lint checks", ) + lint_parser.add_argument( + "--clang-tidy", + nargs="*", + help="bootstraps clang-tidy with the given arguments", + ) lint_parser.set_defaults(func=lint) # parse arguments...