From 1100aabb3130271c3f5a3a6acc0ba08b1542bf4f Mon Sep 17 00:00:00 2001 From: NickStrupat Date: Sat, 10 May 2014 20:53:23 -0400 Subject: [PATCH] Updated README.md --- Aligned.hpp | 8 ++++---- README.md | 11 ++++++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/Aligned.hpp b/Aligned.hpp index 9a35eae..b78a788 100644 --- a/Aligned.hpp +++ b/Aligned.hpp @@ -20,8 +20,8 @@ class Aligned : AlignedBase { : pValue(ALIGNED_POINTER(T, bytes, SizeOfTPaddedToAlignment)) { *pValue = T(value); } ~Aligned() { pValue->~T(); } - T & Get() { return *pValue; } - T const & Get() const { return *pValue; } + T & Ref() { return *pValue; } + T const & Ref() const { return *pValue; } }; template @@ -35,8 +35,8 @@ class Aligned : AlignedBase { pValue(ALIGNED_POINTER(T, pBytes.get(), sizeOfTPaddedToAlignment)) { *pValue = T(value); } ~Aligned() { pValue->~T(); } - T & Get() { return *pValue; } - T const & Get() const { return *pValue; } + T & Ref() { return *pValue; } + T const & Ref() const { return *pValue; } }; template diff --git a/README.md b/README.md index ddb6514..9252403 100644 --- a/README.md +++ b/README.md @@ -5,15 +5,15 @@ Memory alignment wrappers, useful for avoiding false sharing ## Usage -Aligned gives you access to a stack allocated, uninitialized instance of T, which is aligned to a 64-byte boundary in memory (a memory address which is a multiple of 64), and padded out at least to the next 64-byte boundary. This effetively gives you a container to hold an object which you need to be on a boundary. +Aligned gives you access to a stack allocated, initialized instance of T, which is aligned to a 64-byte boundary in memory (a memory address which is a multiple of 64), and padded out at least to the next 64-byte boundary. This effetively gives you a container to hold an object which you need to be on a boundary. This is useful if you need to avoid false sharing in a concurrent system. For example, if your cache line size is 64 bytes, like most modern processors... Aligned, 64> alignedCount = 0; -...will provide you with easy access to an `std::atomic` instance which won't be subject to false sharing, since `alignedCount.reference()` returns a reference to a `std::atomic` instance which lies alone in memory which is aligned and sized correctly to fit in the 64-byte cache line. +...will provide you with easy access to an `std::atomic` instance which won't be subject to false sharing, since `alignedCount.Ref()` returns a reference to a `std::atomic` instance which lies alone in memory which is aligned and sized correctly to fit in the 64-byte cache line. -You can use the `std::atomic` as you'd expect with `++count.reference();`, for example. +You can use the `std::atomic` as you'd expect with `++count.Ref();`, for example. `T`s with `Alignment` unknown at compile time (are allocated on the heap, of course, and) are supported with @@ -31,3 +31,8 @@ Heap allocated arrays with `Alignment` unknown at compile time are supported wit Heap allocated arrays with size and alignment unkown at compile time are supported with Aligned alignedFoos(12, cacheLineSize); + +If you don't know the target machine's cache line size and don't want to guess that it's the usual 64 bytes, You can use the two CacheAligned classes provided... + + CacheAligned cacheAlignedFoos(12); // Grabs the cache line size at run-time. This works on Windows, Mac OS X, and Linux + CacheAligned cacheAlignedFoo; // initialized using Foo() and the cache line size at run-time \ No newline at end of file