summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrobertphillips@google.com <robertphillips@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-12-12 14:24:20 +0000
committerrobertphillips@google.com <robertphillips@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-12-12 14:24:20 +0000
commit2ec3b5919b94276d3c95ac7b1d73bbbd661499cd (patch)
tree2eeaf4c1143b3c955cbbd973ae27106453fda7bb
parentf8792e9d0192f3cf6f51335c1d29bdaf581b401f (diff)
downloadsrc-2ec3b5919b94276d3c95ac7b1d73bbbd661499cd.tar.gz
Reverting r12635 (Make leak counters thread-safe - https://codereview.chromium.org/99483003) due to compile errors on Mac 10.6 & in Chrome
git-svn-id: http://skia.googlecode.com/svn/trunk/src@12637 2bbb7eff-a529-9590-31e7-b0007b416f81
-rw-r--r--core/SkOnce.h155
1 files changed, 155 insertions, 0 deletions
diff --git a/core/SkOnce.h b/core/SkOnce.h
new file mode 100644
index 00000000..89de1124
--- /dev/null
+++ b/core/SkOnce.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOnce_DEFINED
+#define SkOnce_DEFINED
+
+// SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use
+// together to create a threadsafe way to call a function just once. This
+// is particularly useful for lazy singleton initialization. E.g.
+//
+// static void set_up_my_singleton(Singleton** singleton) {
+// *singleton = new Singleton(...);
+// }
+// ...
+// const Singleton& GetSingleton() {
+// static Singleton* singleton = NULL;
+// SK_DECLARE_STATIC_ONCE(once);
+// SkOnce(&once, set_up_my_singleton, &singleton);
+// SkASSERT(NULL != singleton);
+// return *singleton;
+// }
+//
+// OnceTest.cpp also should serve as a few other simple examples.
+
+#include "SkThread.h"
+#include "SkTypes.h"
+
+#ifdef SK_USE_POSIX_THREADS
+# define SK_ONCE_INIT { false, { PTHREAD_MUTEX_INITIALIZER } }
+#else
+# define SK_ONCE_INIT { false, SkBaseMutex() }
+#endif
+
+#define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT
+
+struct SkOnceFlag; // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT
+
+template <typename Func, typename Arg>
+inline void SkOnce(SkOnceFlag* once, Func f, Arg arg);
+
+// ---------------------- Implementation details below here. -----------------------------
+
+struct SkOnceFlag {
+ bool done;
+ SkBaseMutex mutex;
+};
+
+// TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands.
+
+#ifdef SK_BUILD_FOR_WIN
+#include <intrin.h>
+inline static void compiler_barrier() {
+ _ReadWriteBarrier();
+}
+#else
+inline static void compiler_barrier() {
+ asm volatile("" : : : "memory");
+}
+#endif
+
+inline static void full_barrier_on_arm() {
+#ifdef SK_CPU_ARM
+#if SK_ARM_ARCH >= 7
+ asm volatile("dmb" : : : "memory");
+#else
+ asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
+#endif
+#endif
+}
+
+// On every platform, we issue a compiler barrier to prevent it from reordering
+// code. That's enough for platforms like x86 where release and acquire
+// barriers are no-ops. On other platforms we may need to be more careful;
+// ARM, in particular, needs real code for both acquire and release. We use a
+// full barrier, which acts as both, because that the finest precision ARM
+// provides.
+
+inline static void release_barrier() {
+ compiler_barrier();
+ full_barrier_on_arm();
+}
+
+inline static void acquire_barrier() {
+ compiler_barrier();
+ full_barrier_on_arm();
+}
+
+// We've pulled a pretty standard double-checked locking implementation apart
+// into its main fast path and a slow path that's called when we suspect the
+// one-time code hasn't run yet.
+
+// This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
+// This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
+// (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
+template <typename Func, typename Arg>
+static void sk_once_slow(SkOnceFlag* once, Func f, Arg arg) {
+ const SkAutoMutexAcquire lock(once->mutex);
+ if (!once->done) {
+ f(arg);
+ // Also known as a store-store/load-store barrier, this makes sure that the writes
+ // done before here---in particular, those done by calling f(arg)---are observable
+ // before the writes after the line, *done = true.
+ //
+ // In version control terms this is like saying, "check in the work up
+ // to and including f(arg), then check in *done=true as a subsequent change".
+ //
+ // We'll use this in the fast path to make sure f(arg)'s effects are
+ // observable whenever we observe *done == true.
+ release_barrier();
+ once->done = true;
+ }
+}
+
+// We nabbed this code from the dynamic_annotations library, and in their honor
+// we check the same define. If you find yourself wanting more than just
+// ANNOTATE_BENIGN_RACE, it might make sense to pull that in as a dependency
+// rather than continue to reproduce it here.
+
+#if DYNAMIC_ANNOTATIONS_ENABLED
+// TSAN provides this hook to supress a known-safe apparent race.
+extern "C" {
+void AnnotateBenignRace(const char* file, int line, const volatile void* mem, const char* desc);
+}
+#define ANNOTATE_BENIGN_RACE(mem, desc) AnnotateBenignRace(__FILE__, __LINE__, mem, desc)
+#else
+#define ANNOTATE_BENIGN_RACE(mem, desc)
+#endif
+
+// This is our fast path, called all the time. We do really want it to be inlined.
+template <typename Func, typename Arg>
+inline void SkOnce(SkOnceFlag* once, Func f, Arg arg) {
+ ANNOTATE_BENIGN_RACE(&(once->done), "Don't worry TSAN, we're sure this is safe.");
+ if (!once->done) {
+ sk_once_slow(once, f, arg);
+ }
+ // Also known as a load-load/load-store barrier, this acquire barrier makes
+ // sure that anything we read from memory---in particular, memory written by
+ // calling f(arg)---is at least as current as the value we read from once->done.
+ //
+ // In version control terms, this is a lot like saying "sync up to the
+ // commit where we wrote once->done = true".
+ //
+ // The release barrier in sk_once_slow guaranteed that once->done = true
+ // happens after f(arg), so by syncing to once->done = true here we're
+ // forcing ourselves to also wait until the effects of f(arg) are readble.
+ acquire_barrier();
+}
+
+#undef ANNOTATE_BENIGN_RACE
+
+#endif // SkOnce_DEFINED