libunwind-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Libunwind-devel] [PATCH 06/57] Simplify `sos_alloc()' implementation


From: Tommi Rantala
Subject: [Libunwind-devel] [PATCH 06/57] Simplify `sos_alloc()' implementation
Date: Fri, 21 Sep 2012 14:11:07 +0300

Instead of maintaining a pointer to the `sos_memory' array, maintain an
index that tells the next free position. When atomic operations are
available, the allocation boils down to a single fetch-and-add
operation.
---
 include/libunwind_i.h |    6 ++++--
 src/mi/flush_cache.c  |    2 +-
 src/mi/mempool.c      |   50 ++++++++++++++++++++++++-------------------------
 3 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/include/libunwind_i.h b/include/libunwind_i.h
index b1f9cee..15a7973 100644
--- a/include/libunwind_i.h
+++ b/include/libunwind_i.h
@@ -110,11 +110,12 @@ cmpxchg_ptr (void *addr, void *old, void *new)
   return AO_compare_and_swap(u.aop, (AO_t) old, (AO_t) new);
 }
 # define fetch_and_add1(_ptr)          AO_fetch_and_add1(_ptr)
+# define fetch_and_add(_ptr, value)    AO_fetch_and_add(_ptr, value)
    /* GCC 3.2.0 on HP-UX crashes on cmpxchg_ptr() */
 #  if !(defined(__hpux) && __GNUC__ == 3 && __GNUC_MINOR__ == 2)
 #   define HAVE_CMPXCHG
 #  endif
-# define HAVE_FETCH_AND_ADD1
+# define HAVE_FETCH_AND_ADD
 #else
 # ifdef HAVE_IA64INTRIN_H
 #  include <ia64intrin.h>
@@ -132,8 +133,9 @@ cmpxchg_ptr (void *addr, void *old, void *new)
   return __sync_bool_compare_and_swap(u.vlp, (long) old, (long) new);
 }
 #  define fetch_and_add1(_ptr)         __sync_fetch_and_add(_ptr, 1)
+#  define fetch_and_add(_ptr, value)   __sync_fetch_and_add(_ptr, value)
 #  define HAVE_CMPXCHG
-#  define HAVE_FETCH_AND_ADD1
+#  define HAVE_FETCH_AND_ADD
 # endif
 #endif
 #define atomic_read(ptr)       (*(ptr))
diff --git a/src/mi/flush_cache.c b/src/mi/flush_cache.c
index c5650ba..2e88fa8 100644
--- a/src/mi/flush_cache.c
+++ b/src/mi/flush_cache.c
@@ -50,7 +50,7 @@ unw_flush_cache (unw_addr_space_t as, unw_word_t lo, 
unw_word_t hi)
      unw_flush_cache() is allowed to flush more than the requested
      range. */
 
-#ifdef HAVE_FETCH_AND_ADD1
+#ifdef HAVE_FETCH_AND_ADD
   fetch_and_add1 (&as->cache_generation);
 #else
 # warning unw_flush_cache(): need a way to atomically increment an integer.
diff --git a/src/mi/mempool.c b/src/mi/mempool.c
index b04aa63..5e42219 100644
--- a/src/mi/mempool.c
+++ b/src/mi/mempool.c
@@ -40,48 +40,48 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 
SOFTWARE.  */
 #endif
 
 static char sos_memory[SOS_MEMORY_SIZE] ALIGNED(MAX_ALIGN);
-static char *sos_memp;
+static size_t sos_memory_freepos;
 static size_t pg_size;
 
 HIDDEN void *
 sos_alloc (size_t size)
 {
-  char *mem;
-
-#ifdef HAVE_CMPXCHG
-  char *old_mem;
+  size_t pos;
 
   size = UNW_ALIGN(size, MAX_ALIGN);
-  if (!sos_memp)
-    cmpxchg_ptr (&sos_memp, 0, sos_memory);
-  do
-    {
-      old_mem = sos_memp;
 
-      mem = (char *) UNW_ALIGN((unsigned long) old_mem, MAX_ALIGN);
-      mem += size;
-      assert (mem < sos_memory + sizeof (sos_memory));
-    }
-  while (!cmpxchg_ptr (&sos_memp, old_mem, mem));
+#if defined(__GNUC__)
+  /* Assume `sos_memory' is suitably aligned. */
+  assert(((uintptr_t) &sos_memory[0] & (MAX_ALIGN-1)) == 0);
+#endif
+
+#if defined(__GNUC__) && defined(HAVE_FETCH_AND_ADD)
+  pos = fetch_and_add (&sos_memory_freepos, size);
 #else
   static define_lock (sos_lock);
   intrmask_t saved_mask;
 
-  size = UNW_ALIGN(size, MAX_ALIGN);
-
   lock_acquire (&sos_lock, saved_mask);
   {
-    if (!sos_memp)
-      sos_memp = sos_memory;
-
-    mem = (char *) UNW_ALIGN((unsigned long) sos_memp, MAX_ALIGN);
-    mem += size;
-    assert (mem < sos_memory + sizeof (sos_memory));
-    sos_memp = mem;
+# ifndef __GNUC__
+    /* No assumptions about `sos_memory' alignment. */
+    if (sos_memory_freepos == 0)
+      {
+       unsigned align = UNW_ALIGN((uintptr_t) &sos_memory[0], MAX_ALIGN)
+                               - (uintptr_t) &sos_memory[0];
+       sos_memory_freepos = align;
+      }
+# endif
+    pos = sos_memory_freepos;
+    sos_memory_freepos += size;
   }
   lock_release (&sos_lock, saved_mask);
 #endif
-  return mem;
+
+  assert (((uintptr_t) &sos_memory[pos] & (MAX_ALIGN-1)) == 0);
+  assert ((pos+size) <= SOS_MEMORY_SIZE);
+
+  return &sos_memory[pos];
 }
 
 /* Must be called while holding the mempool lock. */
-- 
1.7.9.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]