monotone-commits-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Monotone-commits-diffs] net.venge.monotone.daggy-refinement: d5ac851d3


From: code
Subject: [Monotone-commits-diffs] net.venge.monotone.daggy-refinement: d5ac851d37a4e689d80e2a1c25d2898be20d6520
Date: Wed, 19 Jan 2011 13:58:51 GMT

revision:            d5ac851d37a4e689d80e2a1c25d2898be20d6520
date:                2011-01-18T03:49:15
author:              Timothy Brownawell  <address@hidden>
branch:              net.venge.monotone.daggy-refinement
changelog:
Netsync with policy branches really needs to be able to recurse
down the policy tree as it gets changes at possibly each level.
This requires fast refinement in order to be tolerable.

Start with the data structures needed for daggy cert refinement.

manifest:
format_version "1"

new_manifest [0ad2e89b8a39e117f9e714900fecd5eb035f7092]

old_revision [0adcfd6aa61edc7ce8caf1a80a4daff172dc9bde]

patch "constants.hh"
 from [772e902f840ba1ac694e5d11f7f4befccb13ebbf]
   to [4414102e5c248c7995d11e5d59033631c972a848]

patch "database.cc"
 from [9b6b20b6b342f4294e9e9d6cfd42d228db7ad18f]
   to [a308387ef27e1740025367e5be80da084f172e11]

patch "database.hh"
 from [18a9504000bb0856b693577e3de47861d7a7f5e0]
   to [965437b5f2b984f1c6a940c3771ebc87a5bcee09]

patch "migrate_ancestry.cc"
 from [7e659f3dbdf54efc68479591f24ecaef3db3edd8]
   to [ed9c2cdc677aff728ff3f9ad53b7a7c144ee477d]

patch "migrate_schema.cc"
 from [d6161bf90f49f20afd61716ce2ab56da58567bf4]
   to [7eb8d34881afcc34c0ded54ed38b6737ff40e83b]

patch "migration.hh"
 from [365a4914fd7fd01359aa18df660f4c82d27d3c03]
   to [623fd75b9c469f2eee0591227244c83838aa170a]

patch "schema.sql"
 from [c1f635361a6cae78c93018b4ac3d02546c8dc0c8]
   to [fb6460e0ec0476672fada57fc64bd24e5ea8dccf]
============================================================
--- constants.hh	772e902f840ba1ac694e5d11f7f4befccb13ebbf
+++ constants.hh	4414102e5c248c7995d11e5d59033631c972a848
@@ -38,6 +38,10 @@ namespace constants
   // number of characters in a raw epoch
   std::size_t const epochlen_bytes = idlen_bytes;
 
+  // skipgraph fanout per level
+  // fast netsync refinement requires the same value on both ends
+  std::size_t const skipgraph_fanout = 64;
+
   // default packet size for 'automate stdio'
   std::size_t const default_stdio_packet_size = 32768;
 
============================================================
--- database.cc	9b6b20b6b342f4294e9e9d6cfd42d228db7ad18f
+++ database.cc	a308387ef27e1740025367e5be80da084f172e11
@@ -2831,6 +2831,40 @@ void
 }
 
 void
+database::update_skipgraph_for_rev(revision_id const & rid)
+{
+  // This requires all appropriate ancestors to already be in the skipgraph.
+  // During migrate / cache regen, it needs to be called in
+  // toposort order.
+
+  // This node may not belong in the skipgraph at all (maybe it gets skipped)
+  // If it doesn't, any descendants that may be in the graph may need their
+  // my_cert_summary (nearest descendants at each level) or old_cert_summary
+  // (more distant descendants) updated.
+  // If it does, any descendants may still need to have their old_cert_summary
+  // updated.
+  // There can be descendants, because we may well be updating an arbitrarily
+  // old revision that someone just put a new cert on.
+
+  //   1. Does this revision already exist in the skipgraph?
+  // v    If yes, go to 4.
+  // | 2. Find nearest ancestors that are in the skipgraph (use
+  // |    revision_ancestry). Count intervening ancestors, see if we get an
+  // |    entry.
+  // | 3. If we got an entry, go up a level. Find ancestors at the next level
+  // |    up by walking back at the level of the entry we just added, and see
+  // |    if we repeat for the next higher level.
+  // L 4. Rehash our certs plus ancestors' (walking revision_ancestory) certs
+  //      until (not including) our nearest first-level skip parents. If those
+  //      don't match our stored first-level my_cert_summary, then update it
+  //      and all descendants' first-level old_cert_summary's. For each level
+  //      higher, repeat for ourselves (if we're at that level), or all our
+  //      nearest descendants that are at that level.
+
+  // FIXME: populate this
+}
+
+void
 database::get_revision(revision_id const & id,
                        revision_t & rev)
 {
@@ -3080,7 +3114,7 @@ database::put_revision(revision_id const
   put_height_for_revision(new_id, rev);
 
   // Finally, commit.
-
+  update_skipgraph_for_rev(new_id);
   guard.commit();
   return true;
 }
@@ -3217,6 +3251,13 @@ database::delete_existing_file_sizes()
   imp->execute(query("DELETE FROM file_sizes"));
 }
 
+void
+database::delete_skipgraph()
+{
+  imp->execute(query("DELETE FROM skip_data"));
+  imp->execute(query("DELETE FROM skip_graph"));
+}
+
 /// Deletes one revision from the local database.
 /// @see kill_rev_locally
 void
@@ -3815,6 +3856,7 @@ database::put_revision_cert(cert const &
       record_as_branch_leaf(cert.value, cert.ident);
     }
 
+  update_skipgraph_for_rev(cert.ident);
   imp->cert_stamper.note_change();
   return true;
 }
============================================================
--- database.hh	18a9504000bb0856b693577e3de47861d7a7f5e0
+++ database.hh	965437b5f2b984f1c6a940c3771ebc87a5bcee09
@@ -195,6 +195,9 @@ public:
   bool is_a_ancestor_of_b(revision_id const & ancestor,
                           revision_id const & child);
 
+  // exposed for cache rebuild
+  void update_skipgraph_for_rev(revision_id const & rid);
+
   void get_revision_ids(std::set<revision_id> & ids);
   // this is exposed for 'db check':
   void get_file_ids(std::set<file_id> & ids);
@@ -493,6 +496,9 @@ public:
   void delete_existing_file_sizes();
   void put_file_sizes_for_revision(revision_t const & rev);
 
+  // for regenerate_skipgraph
+  void delete_skipgraph();
+
 private:
   static database_cache dbcache;
 
============================================================
--- schema.sql	c1f635361a6cae78c93018b4ac3d02546c8dc0c8
+++ schema.sql	fb6460e0ec0476672fada57fc64bd24e5ea8dccf
@@ -58,6 +58,24 @@ CREATE INDEX revision_ancestry__child ON
 
 CREATE INDEX revision_ancestry__child ON revision_ancestry (child);
 
+CREATE TABLE skip_data
+       (
+       id,				-- matches revisions.id
+       level int,			-- matches skip_graph.level
+       my_cert_summary not null,	-- certs not covered by any parents
+       old_cert_summary not null,	-- from parents' my_ and old_ hashes
+       primary key (id, level)
+       );
+
+CREATE TABLE skip_graph
+       (
+       parent not null,		-- joins with skip_data.id
+       child not null,		-- joins with skip_data.id
+       level int not null	-- skip-graph level
+       );
+
+CREATE UNIQUE INDEX skip_graph_go_backwards ON skip_graph (child, level, parent);
+
 CREATE TABLE heights
 	(
 	revision not null,	-- joins with revisions.id
============================================================
--- migrate_schema.cc	d6161bf90f49f20afd61716ce2ab56da58567bf4
+++ migrate_schema.cc	7eb8d34881afcc34c0ded54ed38b6737ff40e83b
@@ -875,6 +875,23 @@ char const migrate_add_file_sizes[] =
     "        size not null       -- the size of the file in byte\n"
     "        );";
 
+char const migrate_add_skipgraph[] =
+  "CREATE TABLE skip_data\n"
+  "       (\n"
+  "       id,				-- matches revisions.id\n"
+  "       level int,			-- matches skip_graph.level\n"
+  "       my_cert_summary not null,	-- certs not covered by any parents\n"
+  "       old_cert_summary not null,	-- from parents' my_ and old_ hashes\n"
+  "       primary key (id, level)\n"
+  "       );\n"
+  "CREATE TABLE skip_graph\n"
+  "       (\n"
+  "       parent not null,		-- joins with skip_data.id\n"
+  "       child not null,		-- joins with skip_data.id\n"
+  "       level int not null	-- skip-graph level\n"
+  "       );\n"
+  "CREATE UNIQUE INDEX skip_graph_go_backwards ON skip_graph (child, level, parent);"
+  ;
 
 // these must be listed in order so that ones listed earlier override ones
 // listed later
@@ -970,9 +987,12 @@ const migration_event migration_events[]
   { "1f60cec1b0f6c8c095dc6d0ffeff2bd0af971ce1",
     migrate_better_cert_indexing, 0, upgrade_none, regen_none },
 
+  { "c3a13c60edc432f9a7739f8a260565d77112c86e",
+    migrate_add_skipgraph, 0, upgrade_none, regen_skipgraph },
+
   // The last entry in this table should always be the current
   // schema ID, with 0 for the migrators.
-  { "c3a13c60edc432f9a7739f8a260565d77112c86e", 0, 0, upgrade_none, regen_none }
+  { "10121ea0cab0b1be8ac6c370c7dbf83689bbe094", 0, 0, upgrade_none, regen_none }
 };
 const size_t n_migration_events = (sizeof migration_events
                                    / sizeof migration_events[0]);
============================================================
--- migration.hh	365a4914fd7fd01359aa18df660f4c82d27d3c03
+++ migration.hh	623fd75b9c469f2eee0591227244c83838aa170a
@@ -32,7 +32,8 @@ enum regen_cache_type { regen_none = 0, 
 // value of the "catch all" item "regen_all"
 enum regen_cache_type { regen_none = 0, regen_rosters = 1,
                         regen_heights = 2, regen_branches = 4,
-                        regen_file_sizes = 8, regen_all = 15 };
+                        regen_file_sizes = 8, regen_skipgraph = 16,
+                        regen_all = 31 };
 
 class migration_status {
   regen_cache_type _regen_type;
============================================================
--- migrate_ancestry.cc	7e659f3dbdf54efc68479591f24ecaef3db3edd8
+++ migrate_ancestry.cc	ed9c2cdc677aff728ff3f9ad53b7a7c144ee477d
@@ -1112,7 +1112,50 @@ regenerate_file_sizes(database & db)
   P(F("finished regenerating cached file sizes"));
 }
 
+typedef rev_ancestry_map::const_iterator ancestry_iter;
+typedef pair<ancestry_iter, ancestry_iter> ancestry_iter_pair;
+
+// Find all nearest ancestors in the 'reverse' ancestry map,
+// which are also in the 'next_reverse' ancestry map.
 void
+find_chosen_ancestors(rev_ancestry_map const & reverse,
+                      rev_ancestry_map const & next_reverse,
+                      revision_id const & revid,
+                      set<revision_id> & chosen)
+{
+  for (ancestry_iter_pair parents = reverse.equal_range(revid);
+       parents.first != parents.second; ++parents.first)
+    {
+      revision_id const & this_parent = parents.first->second;
+      if (next_reverse.find(this_parent) != next_reverse.end())
+        find_chosen_ancestors(reverse, next_reverse, this_parent, chosen);
+      else
+        chosen.insert(this_parent);
+    }
+}
+
+void
+regenerate_skipgraph(database & db)
+{
+  db.ensure_open_for_cache_reset();
+  transaction_guard guard(db);
+
+  // clear the skipgraph and the data
+  db.delete_skipgraph();
+
+  // rebuild the skipgraph
+  vector<revision_id> sorted_ids;
+  allrevs_toposorted(db, sorted_ids);
+  for (vector<revision_id>::const_iterator r = sorted_ids.begin();
+       r != sorted_ids.end(); ++r)
+    {
+      db.update_skipgraph_for_rev(*r);
+    }
+
+  guard.commit();
+}
+
+void
 regenerate_caches(database & db, regen_cache_type type)
 {
   I(type != regen_none);
@@ -1125,6 +1168,8 @@ regenerate_caches(database & db, regen_c
     regenerate_branches(db);
   if ((type & regen_file_sizes) == regen_file_sizes)
     regenerate_file_sizes(db);
+  if ((type & regen_skipgraph) == regen_skipgraph)
+    regenerate_skipgraph(db);
 }
 
 // Local Variables:

reply via email to

[Prev in Thread] Current Thread [Next in Thread]