bug-guile
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

bug#28590: [PATCH 0/7] Attempt to reduce memory growth


From: Ludovic Courtès
Subject: bug#28590: [PATCH 0/7] Attempt to reduce memory growth
Date: Wed, 04 Oct 2017 15:09:17 +0200
User-agent: Gnus/5.13 (Gnus v5.13) Emacs/25.3 (gnu/linux)

Ludovic Courtès <address@hidden> skribis:

> I’ll try to combine that with incremental marking of the weak table, but
> I’m not very hopeful.

Here’s an attempt to mark tables incrementally.

Unfortunately it misses references sometimes (e.g., values of a weak-key
table do not get marked even though they should.)

I’m unsure whether incremental marking is actually doable: the mark
procedure only knows its own part of the mark state, not the global mark
state.  For instance, it cannot distinguish between different mark
phases.  I tried to fix that by memorizing the ‘GC_mark_no’ value we
were in when we left the mark procedure, but that didn’t help.

Ideas?

Ludo’.

diff --git a/libguile/weak-table.c b/libguile/weak-table.c
index 1a99a130f..cec9ee8ae 100644
--- a/libguile/weak-table.c
+++ b/libguile/weak-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011, 2012, 2013, 2014 Free Software Foundation, Inc.
+/* Copyright (C) 2011, 2012, 2013, 2014, 2017 Free Software Foundation, Inc.
  * 
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public License
@@ -94,6 +94,15 @@ typedef struct {
   scm_t_bits value;
 } scm_t_weak_entry;
 
+#define WEAK_MAGIC  0x9876123450UL
+
+struct weak_mark_state
+{
+  unsigned long magic;            /* distinguished magic number */
+  unsigned long index;            /* index where to resume marking */
+  unsigned long count;            /* number of entries in the array */
+  scm_t_weak_entry *entries;      /* pointer to the array of entries */
+};
 
 struct weak_entry_data {
   scm_t_weak_entry *in;
@@ -252,6 +261,7 @@ typedef struct {
   unsigned long upper;         /* when to grow */
   int size_index;              /* index into hashtable_size */
   int min_size_index;          /* minimum size_index */
+  struct weak_mark_state *mark_state;
 } scm_t_weak_table;
 
 
@@ -288,7 +298,11 @@ rob_from_rich (scm_t_weak_table *table, unsigned long k)
 
   /* If we are to free up slot K in the table, we need room to do so.  */
   assert (table->n_items < size);
-  
+
+  /* To be on the safe side, have the next mark phase start from the
+     beginning.  */
+  table->mark_state->index = 0;
+
   empty = k;
   do 
     empty = (empty + 1) % size;
@@ -318,6 +332,10 @@ give_to_poor (scm_t_weak_table *table, unsigned long k)
   /* Slot K was just freed up; possibly shuffle others down.  */
   unsigned long size = table->size;
 
+  /* To be on the safe side, have the next mark phase start from the
+     beginning.  */
+  table->mark_state->index = 0;
+
   while (1)
     {
       unsigned long next = (k + 1) % size;
@@ -354,49 +372,96 @@ give_to_poor (scm_t_weak_table *table, unsigned long k)
 static int weak_key_gc_kind;
 static int weak_value_gc_kind;
 
+extern GC_word GC_mark_no;
+
 static struct GC_ms_entry *
 mark_weak_key_table (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
                      struct GC_ms_entry *mark_stack_limit, GC_word env)
 {
-  scm_t_weak_entry *entries = (scm_t_weak_entry*) addr;
-  unsigned long k, size = GC_size (addr) / sizeof (scm_t_weak_entry);
-
-  for (k = 0; k < size; k++)
+  struct weak_mark_state *state = (struct weak_mark_state *) addr;
+  scm_t_weak_entry *entries = state->entries;
+  unsigned long k, size = state->count;
+  unsigned long credit = GC_PROC_BYTES / sizeof (GC_word);
+
+  if (state->magic != WEAK_MAGIC)
+    /* STATE might point to a freed element.  */
+    return mark_stack_ptr;
+
+  for (k = state->index;
+       credit > 0 && k < size;
+       k++)
     if (entries[k].hash && entries[k].key)
       {
         SCM value = SCM_PACK (entries[k].value);
-        mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word*) SCM2PTR (value),
+        mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word *) SCM2PTR (value),
                                            mark_stack_ptr, mark_stack_limit,
                                            NULL);
+       credit--;
       }
 
+  if (k == size)
+    /* We marked ENTRIES till the end.  */
+    state->index = 0;
+  else
+    {
+      /* Save the current index and push ADDR back on the mark stack so
+        we can resume later.  */
+      state->index = k;
+      mark_stack_ptr = GC_MARK_AND_PUSH (addr,
+                                        mark_stack_ptr, mark_stack_limit,
+                                        NULL);
+    }
+
   return mark_stack_ptr;
 }
 
 static struct GC_ms_entry *
 mark_weak_value_table (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
-                       struct GC_ms_entry *mark_stack_limit, GC_word env)
+                      struct GC_ms_entry *mark_stack_limit, GC_word env)
 {
-  scm_t_weak_entry *entries = (scm_t_weak_entry*) addr;
-  unsigned long k, size = GC_size (addr) / sizeof (scm_t_weak_entry);
-
-  for (k = 0; k < size; k++)
+  struct weak_mark_state *state = (struct weak_mark_state *) addr;
+  scm_t_weak_entry *entries = state->entries;
+  unsigned long k, size = state->count;
+  unsigned long credit = GC_PROC_BYTES / sizeof (GC_word);
+
+  if (state->magic != WEAK_MAGIC)
+    /* STATE might point to a freed element.  */
+    return mark_stack_ptr;
+
+  for (k = state->index;
+       credit > 0 && k < size;
+       k++)
     if (entries[k].hash && entries[k].value)
       {
         SCM key = SCM_PACK (entries[k].key);
-        mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word*) SCM2PTR (key),
+        mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word *) SCM2PTR (key),
                                            mark_stack_ptr, mark_stack_limit,
                                            NULL);
+       credit--;
       }
 
+  if (k == size)
+    /* We marked ENTRIES till the end.  */
+    state->index = 0;
+  else
+    {
+      /* Save the current index and push ADDR back on the mark stack so
+        we can resume later.  */
+      state->index = k;
+      mark_stack_ptr = GC_MARK_AND_PUSH (addr,
+                                        mark_stack_ptr, mark_stack_limit,
+                                        NULL);
+    }
+
   return mark_stack_ptr;
 }
 
-static scm_t_weak_entry *
+
+static struct weak_mark_state *
 allocate_entries (unsigned long size, scm_t_weak_table_kind kind)
 {
-  scm_t_weak_entry *ret;
-  size_t bytes = size * sizeof (*ret);
+  struct weak_mark_state *ret;
+  size_t bytes = size * sizeof (scm_t_weak_entry) + sizeof *ret;
 
   switch (kind)
     {
@@ -414,6 +479,9 @@ allocate_entries (unsigned long size, scm_t_weak_table_kind 
kind)
     }
 
   memset (ret, 0, bytes);
+  ret->magic = WEAK_MAGIC;
+  ret->count = size;
+  ret->entries = (scm_t_weak_entry *) ((char *) ret + sizeof (*ret));
 
   return ret;
 }
@@ -533,6 +601,7 @@ update_entry_count (scm_t_weak_table *table)
 static void
 resize_table (scm_t_weak_table *table)
 {
+  struct weak_mark_state *mark_state, *old_state;
   scm_t_weak_entry *old_entries, *new_entries;
   int new_size_index;
   unsigned long old_size, new_size, old_k;
@@ -554,11 +623,13 @@ resize_table (scm_t_weak_table *table)
     }
   while (!is_acceptable_size_index (table, new_size_index));
 
-  new_entries = allocate_entries (new_size, table->kind);
+  mark_state = allocate_entries (new_size, table->kind);
+  new_entries = mark_state->entries;
 
   old_entries = table->entries;
   old_size = table->size;
-  
+
+  table->mark_state = mark_state;
   table->size_index = new_size_index;
   table->size = new_size;
   if (new_size_index <= table->min_size_index)
@@ -618,6 +689,8 @@ resize_table (scm_t_weak_table *table)
                                    SCM_PACK (copy.key), SCM_PACK (copy.value),
                                    table->kind);
     }
+
+  old_state->magic = 0;
 }
 
 /* Run after GC via do_vacuum_weak_table, this function runs over the
@@ -807,6 +880,9 @@ weak_table_remove_x (scm_t_weak_table *table, unsigned long 
hash,
   hash = (hash << 1) | 0x1;
   k = hash_to_index (hash, size);
 
+  /* To be on the safe side, mark the whole array of entries again.  */
+  table->mark_state->index = 0;
+
   for (distance = 0; distance < size; distance++, k = (k + 1) % size)
     {
       unsigned long other_hash;
@@ -869,7 +945,8 @@ make_weak_table (unsigned long k, scm_t_weak_table_kind 
kind)
   n = hashtable_size[i];
 
   table = scm_gc_malloc (sizeof (*table), "weak-table");
-  table->entries = allocate_entries (n, kind);
+  table->mark_state = allocate_entries (n, kind);
+  table->entries = table->mark_state->entries;
   table->kind = kind;
   table->n_items = 0;
   table->size = n;

reply via email to

[Prev in Thread] Current Thread [Next in Thread]