# # patch "commands.cc" # from [e34d33faa7468ccf66a0b34c841dac39b823642a] # to [aa4839cbdb1f7ecc4f8f12f58cf30d4b50f4f565] # # patch "diff_patch.cc" # from [4b6a40d75971a63ab35a7cfd745d3247aa17e0d9] # to [ebe5306eb73976216ea8d862ed6ae2ce0986af88] # # patch "diff_patch.hh" # from [f499e229bf13e49c06c4c9129a1e48b2b1a7ef65] # to [e4e4bf98edbab213f7deab03c90ef306976ebcc8] # # patch "merge.cc" # from [b0b447f677a2e152f0d9cf15afeee01ee23c88c4] # to [dc75799326eea15af4fc9cfd65dc3a82f38613d6] # # patch "revision.cc" # from [660a3bf2779b462955c45cc154e37639e23eb6a3] # to [7ad2ceff75b3b6a27be0cc75501c66514ae896f3] # # patch "revision.hh" # from [3bd95a5f0eeee3f31b8e6f835046d757b4b63488] # to [ee644b839e568dd7cf3302381044f87027591e61] # # patch "work.cc" # from [5157509316effe608f2939ffda66714edb6b2a71] # to [c20dfd83af47411efbbb99caa23ec2cbff77e9e8] # # patch "work.hh" # from [5690b5fbbff1b09efb707af80ba8b4f84c0288cf] # to [ec5f5d73337dd3c6a6d601ca5b3b50303e76ce0c] # ======================================================================== --- commands.cc e34d33faa7468ccf66a0b34c841dac39b823642a +++ commands.cc aa4839cbdb1f7ecc4f8f12f58cf30d4b50f4f565 @@ -2747,43 +2747,9 @@ } */ -CMD(lca, N_("debug"), N_("LEFT RIGHT"), N_("print least common ancestor"), OPT_NONE) -{ - if (args.size() != 2) - throw usage(name); - - revision_id anc, left, right; - - complete(app, idx(args, 0)(), left); - complete(app, idx(args, 1)(), right); - - if (find_least_common_ancestor(left, right, anc, app)) - std::cout << describe_revision(app, anc) << std::endl; - else - std::cout << _("no common ancestor found") << std::endl; -} - - -CMD(lcad, N_("debug"), N_("LEFT RIGHT"), N_("print least common ancestor / dominator"), - OPT_NONE) -{ - if (args.size() != 2) - throw usage(name); - - revision_id anc, left, right; - - complete(app, idx(args, 0)(), left); - complete(app, idx(args, 1)(), right); - - if (find_common_ancestor_for_merge(left, right, anc, app)) - std::cout << describe_revision(app, anc) << std::endl; - else - std::cout << _("no common ancestor/dominator found") << std::endl; -} - - /* // FIXME_ROSTERS: disabled until rewritten to use rosters + static void write_file_targets(change_set const & cs, update_merge_provider & merger, ======================================================================== --- diff_patch.cc 4b6a40d75971a63ab35a7cfd745d3247aa17e0d9 +++ diff_patch.cc ebe5306eb73976216ea8d862ed6ae2ce0986af88 @@ -13,7 +13,7 @@ #include "diff_patch.hh" #include "interner.hh" #include "lcs.hh" -#include "manifest.hh" +#include "roster.hh" #include "packet.hh" #include "sanity.hh" #include "transforms.hh" @@ -457,10 +457,10 @@ merge_provider::merge_provider(app_state & app, - manifest_map const & anc_man, - manifest_map const & left_man, - manifest_map const & right_man) - : app(app), anc_man(anc_man), left_man(left_man), right_man(right_man) + roster_t const & anc_ros, + roster_t const & left_ros, + roster_t const & right_ros) + : app(app), anc_ros(anc_ros), left_ros(left_ros), right_ros(right_ros) {} void merge_provider::record_merge(file_id const & left_ident, @@ -489,25 +489,22 @@ } std::string merge_provider::get_file_encoding(file_path const & path, - manifest_map const & man) + roster_t const & ros) { - std::string enc; - if (get_attribute_from_db(path, encoding_attribute, man, enc, app)) - return enc; - else - return default_encoding; + attr_value v; + if (get_attribute_from_roster(ros, path, encoding_attribute, v)) + return v(); + return default_encoding; } bool merge_provider::attribute_manual_merge(file_path const & path, - manifest_map const & man) + roster_t const & ros) { - std::string mmf; - if (get_attribute_from_db(path, manual_merge_attribute, man, mmf, app)) - { - return mmf == std::string("true"); - } - else - return false; // default: enable auto merge + attr_value v; + if (get_attribute_from_roster(ros, path, manual_merge_attribute, v) + && v() == "true") + return true; + return false; // default: enable auto merge } bool merge_provider::try_to_merge_files(file_path const & anc_path, @@ -546,17 +543,17 @@ ancestor_unpacked = ancestor_data.inner(); right_unpacked = right_data.inner(); - if (!attribute_manual_merge(left_path, left_man) && - !attribute_manual_merge(right_path, right_man)) + if (!attribute_manual_merge(left_path, left_ros) && + !attribute_manual_merge(right_path, right_ros)) { // both files mergeable by monotone internal algorithm, try to merge // note: the ancestor is not considered for manual merging. Forcing the // user to merge manually just because of an ancestor mistakenly marked // manual seems too harsh string left_encoding, anc_encoding, right_encoding; - left_encoding = this->get_file_encoding(left_path, left_man); - anc_encoding = this->get_file_encoding(anc_path, anc_man); - right_encoding = this->get_file_encoding(right_path, right_man); + left_encoding = this->get_file_encoding(left_path, left_ros); + anc_encoding = this->get_file_encoding(anc_path, anc_ros); + right_encoding = this->get_file_encoding(right_path, right_ros); vector left_lines, ancestor_lines, right_lines, merged_lines; split_into_lines(left_unpacked(), left_encoding, left_lines); @@ -680,10 +677,10 @@ // and we only record the merges in a transient, in-memory table. update_merge_provider::update_merge_provider(app_state & app, - manifest_map const & anc_man, - manifest_map const & left_man, - manifest_map const & right_man) - : merge_provider(app, anc_man, left_man, right_man) {} + roster_t const & anc_ros, + roster_t const & left_ros, + roster_t const & right_ros) + : merge_provider(app, anc_ros, left_ros, right_ros) {} void update_merge_provider::record_merge(file_id const & left_id, file_id const & right_id, @@ -719,30 +716,6 @@ } } -std::string update_merge_provider::get_file_encoding(file_path const & path, - manifest_map const & man) -{ - std::string enc; - if (get_attribute_from_working_copy(path, encoding_attribute, enc)) - return enc; - else if (get_attribute_from_db(path, encoding_attribute, man, enc, app)) - return enc; - else - return default_encoding; -} - -bool update_merge_provider::attribute_manual_merge(file_path const & path, - manifest_map const & man) -{ - std::string mmf; - if (get_attribute_from_working_copy(path, manual_merge_attribute, mmf)) - return mmf == std::string("true"); - else if (get_attribute_from_db(path, manual_merge_attribute, man, mmf, app)) - return mmf == std::string("true"); - else - return false; // default: enable auto merge -} - // the remaining part of this file just handles printing out various // diff formats for the case where someone wants to *read* a diff // rather than apply it. ======================================================================== --- diff_patch.hh f499e229bf13e49c06c4c9129a1e48b2b1a7ef65 +++ diff_patch.hh e4e4bf98edbab213f7deab03c90ef306976ebcc8 @@ -14,6 +14,8 @@ #include #include +struct roster_t; + struct conflict {}; // this file is to contain some stripped down, in-process implementations @@ -36,13 +38,13 @@ struct merge_provider { app_state & app; - manifest_map const & anc_man; - manifest_map const & left_man; - manifest_map const & right_man; + roster_t const & anc_ros; + roster_t const & left_ros; + roster_t const & right_ros; merge_provider(app_state & app, - manifest_map const & anc_man, - manifest_map const & left_man, - manifest_map const & right_man); + roster_t const & anc_ros, + roster_t const & left_ros, + roster_t const & right_ros); // merge3 on a file (line by line) virtual bool try_to_merge_files(file_path const & anc_path, @@ -73,11 +75,11 @@ file_data & dat); virtual std::string get_file_encoding(file_path const & path, - manifest_map const & man); + roster_t const & ros); virtual bool attribute_manual_merge(file_path const & path, - manifest_map const & man); - + roster_t const & ros); + virtual ~merge_provider() {} }; @@ -85,9 +87,9 @@ { std::map temporary_store; update_merge_provider(app_state & app, - manifest_map const & anc_man, - manifest_map const & left_man, - manifest_map const & right_man); + roster_t const & anc_ros, + roster_t const & left_ros, + roster_t const & right_ros); virtual void record_merge(file_id const & left_ident, file_id const & right_ident, @@ -99,12 +101,6 @@ file_id const & ident, file_data & dat); - virtual std::string get_file_encoding(file_path const & path, - manifest_map const & man); - - virtual bool attribute_manual_merge(file_path const & path, - manifest_map const & man); - virtual ~update_merge_provider() {} }; ======================================================================== --- merge.cc b0b447f677a2e152f0d9cf15afeee01ee23c88c4 +++ merge.cc dc75799326eea15af4fc9cfd65dc3a82f38613d6 @@ -5,13 +5,51 @@ #include -#include "revision.hh" -#include "transforms.hh" +#include + +#include "diff_patch.hh" #include "merge.hh" +#include "packet.hh" +#include "revision.hh" #include "roster_merge.hh" -#include "packet.hh" #include "safe_map.hh" +#include "transforms.hh" +using std::map; +using std::make_pair; +using boost::shared_ptr; + +static void +load_and_cache_roster(revision_id const & rid, + map > & rmap, + shared_ptr & rout, + app_state & app) +{ + map >::const_iterator i = rmap.find(rid); + if (i != rmap.end()) + rout = i->second; + else + { + rout = shared_ptr(new roster_t()); + marking_map mm; + app.db.get_roster(rid, *rout, mm); + safe_insert(rmap, make_pair(rid, rout)); + } +} + +static void +get_file_details(roster_t const & ros, node_id nid, + file_id & fid, + file_path & pth) +{ + I(ros.has_node(nid)); + file_t f = downcast_to_file_t(ros.get_node(nid)); + fid = f->content; + split_path sp; + ros.get_name(nid, sp); + pth = file_path(sp); +} + void interactive_merge_and_store(revision_id const & left_rid, revision_id const & right_rid, @@ -35,7 +73,103 @@ roster_t & merged_roster = result.roster; - // FIXME_ROSTERS: add code here to resolve conflicts + // FIXME_ROSTERS: we only have code (below) to invoke the + // line-merger on content conflicts. Other classes of conflict will + // cause an invariant to trip below. Probably just a bunch of lua + // hooks for remaining conflict types will be ok. + + if (!result.is_clean()) + { + L(F("unclean mark-merge: %d name conflicts, %d content conflicts, %d attr conflicts\n") + % result.node_name_conflicts.size() + % result.file_content_conflicts.size() + % result.node_attr_conflicts.size()); + + for (size_t i = 0; i < result.node_name_conflicts.size(); ++i) + L(F("name conflict on node %d: [parent %d, self %s] vs. [parent %d, self %s]\n") + % result.node_name_conflicts[i].nid + % result.node_name_conflicts[i].left.first + % result.node_name_conflicts[i].left.second + % result.node_name_conflicts[i].right.first + % result.node_name_conflicts[i].right.second); + + for (size_t i = 0; i < result.file_content_conflicts.size(); ++i) + L(F("content conflict on node %d: [%s] vs. [%s]\n") + % result.file_content_conflicts[i].nid + % result.file_content_conflicts[i].left + % result.file_content_conflicts[i].right); + + for (size_t i = 0; i < result.node_attr_conflicts.size(); ++i) + L(F("attribute conflict on node %d, key %s: [%d, %s] vs. [%d, %s]\n") + % result.node_attr_conflicts[i].nid + % result.node_attr_conflicts[i].key + % result.node_attr_conflicts[i].left.first + % result.node_attr_conflicts[i].left.second + % result.node_attr_conflicts[i].right.first + % result.node_attr_conflicts[i].right.second); + + // Attempt to auto-resolve any content conflicts using the line-merger. + // To do this requires finding a merge ancestor. + if (!result.file_content_conflicts.empty()) + { + + L(F("examining content conflicts\n")); + std::vector residual_conflicts; + + revision_id lca; + map > lca_rosters; + find_common_ancestor_for_merge(left_rid, right_rid, lca, app); + + for (size_t i = 0; i < result.file_content_conflicts.size(); ++i) + { + file_content_conflict const & conflict = result.file_content_conflicts[i]; + + // For each file, if the lca is nonzero and its roster + // contains the file, then we use its roster. Otherwise + // we use the roster at the file's birth revision, which + // is the "per-file worst case" lca. + shared_ptr roster_for_file_lca; + + // Begin by loading any non-empty file lca roster + if (!lca.inner()().empty()) + load_and_cache_roster(lca, lca_rosters, roster_for_file_lca, app); + + // If this roster doesn't contain the file, replace it with + // the file's birth roster. + if (!roster_for_file_lca->has_node(conflict.nid)) + { + marking_map::const_iterator j = left_marking_map.find(conflict.nid); + I(j != left_marking_map.end()); + load_and_cache_roster(j->second.birth_revision, lca_rosters, + roster_for_file_lca, app); + } + + // Now we should certainly have the node. + I(roster_for_file_lca->has_node(conflict.nid)); + + file_id anc_id, left_id, right_id; + file_path anc_path, left_path, right_path; + get_file_details (*roster_for_file_lca, conflict.nid, anc_id, anc_path); + get_file_details (left_roster, conflict.nid, left_id, left_path); + get_file_details (right_roster, conflict.nid, right_id, right_path); + + file_id merged_id; + + merge_provider mp(app, *roster_for_file_lca, left_roster, right_roster); + if (mp.try_to_merge_files(anc_path, left_path, right_path, right_path, + anc_id, left_id, right_id, merged_id)) + { + L(F("resolved content conflict %d / %d\n") + % (i+1) % result.file_content_conflicts.size()); + } + else + residual_conflicts.push_back(conflict); + } + result.file_content_conflicts = residual_conflicts; + } + } + + // write new files into the db I(result.is_clean()); ======================================================================== --- revision.cc 660a3bf2779b462955c45cc154e37639e23eb6a3 +++ revision.cc 7ad2ceff75b3b6a27be0cc75501c66514ae896f3 @@ -107,345 +107,128 @@ } -// calculating least common ancestors is a delicate thing. -// -// it turns out that we cannot choose the simple "least common ancestor" -// for purposes of a merge, because it is possible that there are two -// equally reachable common ancestors, and this produces ambiguity in the -// merge. the result -- in a pathological case -- is silently accepting one -// set of edits while discarding another; not exactly what you want a -// version control tool to do. +// For a surprisingly long time, we have been using an algorithm which +// is nonsense, based on a misunderstanding of what "LCA" means. The +// LCA of two nodes is *not* the first common ancestor which you find +// when iteratively expanding their ancestor sets. Instead, the LCA is +// the common ancestor which is a descendent of all other common +// ancestors. // -// a conservative approximation is what we'll call a "subgraph recurring" -// LCA algorithm. this is somewhat like locating the least common dominator -// node, but not quite. it is actually just a vanilla LCA search, except -// that any time there's a fork (a historical merge looks like a fork from -// our perspective, working backwards from children to parents) it reduces -// the fork to a common parent via a sequence of pairwise recursive calls -// to itself before proceeding. this will always resolve to a common parent -// with no ambiguity, unless it falls off the root of the graph. +// In general, a set of nodes in a DAG doesn't always have an +// LCA. There might be multiple common ancestors which are not parents +// of one another. So we implement something which is "functionally +// useful" for finding a merge point (and moreover, which always +// terminates): we find an LCA of the input set if it exists, +// otherwise we replace the input set with the nodes we did find and +// repeat. // -// unfortunately the subgraph recurring algorithm sometimes goes too far -// back in history -- for example if there is an unambiguous propagate from -// one branch to another, the entire subgraph preceeding the propagate on -// the recipient branch is elided, since it is a merge. -// -// our current hypothesis is that the *exact* condition we're looking for, -// when doing a merge, is the least node which dominates one side of the -// merge and is an ancestor of the other. +// All previous discussions in monotone-land, before say August 2005, +// of LCA (and LCAD) are essentially wrong due to our silly +// misunderstanding. It's unfortunate, but our half-baked +// approximations worked almost well enough to take us through 3 years +// of deployed use. Hopefully this more accurate new use will serve us +// even longer. typedef unsigned long ctx; typedef boost::dynamic_bitset<> bitmap; typedef boost::shared_ptr shared_bitmap; static void -ensure_parents_loaded(ctx child, - std::map & parents, - interner & intern, - app_state & app) -{ - if (parents.find(child) != parents.end()) - return; +calculate_ancestors_from_graph(interner & intern, + revision_id const & init, + std::multimap const & graph, + std::map< ctx, shared_bitmap > & ancestors, + shared_bitmap & total_union); - L(F("loading parents for node %d\n") % child); - - std::set imm_parents; - app.db.get_revision_parents(revision_id(intern.lookup(child)), imm_parents); - - // The null revision is not a parent for purposes of finding common - // ancestors. - for (std::set::iterator p = imm_parents.begin(); - p != imm_parents.end(); ) - { - if (null_id(*p)) - imm_parents.erase(p++); - else - ++p; - } - - shared_bitmap bits = shared_bitmap(new bitmap(parents.size())); - - for (std::set::const_iterator p = imm_parents.begin(); - p != imm_parents.end(); ++p) - { - ctx pn = intern.intern(p->inner()()); - L(F("parent %s -> node %d\n") % *p % pn); - if (pn >= bits->size()) - bits->resize(pn+1); - bits->set(pn); - } - - parents.insert(std::make_pair(child, bits)); -} - -static bool -expand_dominators(std::map & parents, - std::map & dominators, - interner & intern, - app_state & app) +void +find_common_ancestor_for_merge(revision_id const & left, + revision_id const & right, + revision_id & anc, + app_state & app) { - bool something_changed = false; - std::vector nodes; + interner intern; + std::set leaves; + std::map ancestors; - nodes.reserve(dominators.size()); + shared_bitmap isect = shared_bitmap(new bitmap()); + shared_bitmap isect_ancs = shared_bitmap(new bitmap()); - // pass 1, pull out all the node numbers we're going to scan this time around - for (std::map::reverse_iterator e = dominators.rbegin(); - e != dominators.rend(); ++e) - nodes.push_back(e->first); - - // pass 2, update any of the dominator entries we can - for (std::vector::const_iterator n = nodes.begin(); - n != nodes.end(); ++n) - { - shared_bitmap bits = dominators[*n]; - bitmap saved(*bits); - if (bits->size() <= *n) - bits->resize(*n + 1); - bits->set(*n); - - ensure_parents_loaded(*n, parents, intern, app); - shared_bitmap n_parents = parents[*n]; - - bitmap intersection(bits->size()); - - bool first = true; - for (unsigned long parent = 0; - parent != n_parents->size(); ++parent) - { - if (! n_parents->test(parent)) - continue; + leaves.insert(intern.intern(left.inner()())); + leaves.insert(intern.intern(right.inner()())); - if (dominators.find(parent) == dominators.end()) - dominators.insert(std::make_pair(parent, - shared_bitmap(new bitmap()))); - shared_bitmap pbits = dominators[parent]; - if (intersection.size() > pbits->size()) - pbits->resize(intersection.size()); + std::multimap inverse_graph; + { + std::multimap graph; + app.db.get_revision_ancestry(graph); + typedef std::multimap::const_iterator gi; + for (gi i = graph.begin(); i != graph.end(); ++i) + inverse_graph.insert(std::make_pair(i->second, i->first)); + } - if (pbits->size() > intersection.size()) - intersection.resize(pbits->size()); - - if (first) - { - intersection = (*pbits); - first = false; - } - else - intersection &= (*pbits); - } - - if (intersection.size() > bits->size()) - bits->resize(intersection.size()); - - if (bits->size() > intersection.size()) - intersection.resize(bits->size()); - (*bits) |= intersection; - if (*bits != saved) - something_changed = true; - } - return something_changed; -} - - -static bool -expand_ancestors(std::map & parents, - std::map & ancestors, - interner & intern, - app_state & app) -{ - bool something_changed = false; - std::vector nodes; - - nodes.reserve(ancestors.size()); - - // pass 1, pull out all the node numbers we're going to scan this time around - for (std::map::reverse_iterator e = ancestors.rbegin(); - e != ancestors.rend(); ++e) - nodes.push_back(e->first); - // pass 2, update any of the ancestor entries we can - for (std::vector::const_iterator n = nodes.begin(); n != nodes.end(); ++n) + while (leaves.size() != 1) { - shared_bitmap bits = ancestors[*n]; - bitmap saved(*bits); - if (bits->size() <= *n) - bits->resize(*n + 1); - bits->set(*n); + isect->clear(); + isect_ancs->clear(); - ensure_parents_loaded(*n, parents, intern, app); - shared_bitmap n_parents = parents[*n]; - for (ctx parent = 0; parent != n_parents->size(); ++parent) + // First intersect all ancestors of current leaf set + for (std::set::const_iterator i = leaves.begin(); i != leaves.end(); ++i) { - if (! n_parents->test(parent)) - continue; - - if (bits->size() <= parent) - bits->resize(parent + 1); - bits->set(parent); - - if (ancestors.find(parent) == ancestors.end()) - ancestors.insert(make_pair(parent, - shared_bitmap(new bitmap()))); - shared_bitmap pbits = ancestors[parent]; - - if (bits->size() > pbits->size()) - pbits->resize(bits->size()); - - if (pbits->size() > bits->size()) - bits->resize(pbits->size()); - - (*bits) |= (*pbits); - } - if (*bits != saved) - something_changed = true; - } - return something_changed; -} - -static bool -find_intersecting_node(bitmap & fst, - bitmap & snd, - interner const & intern, - revision_id & anc) -{ - - if (fst.size() > snd.size()) - snd.resize(fst.size()); - else if (snd.size() > fst.size()) - fst.resize(snd.size()); - - bitmap intersection = fst & snd; - if (intersection.any()) - { - L(F("found %d intersecting nodes\n") % intersection.count()); - for (ctx i = 0; i < intersection.size(); ++i) - { - if (intersection.test(i)) + ctx curr_leaf = *i; + shared_bitmap curr_leaf_ancestors; + std::map::const_iterator j = ancestors.find(*i); + if (j != ancestors.end()) + curr_leaf_ancestors = j->second; + else { - anc = revision_id(intern.lookup(i)); - return true; + curr_leaf_ancestors = shared_bitmap(new bitmap()); + calculate_ancestors_from_graph(intern, revision_id(intern.lookup(curr_leaf)), + inverse_graph, ancestors, + curr_leaf_ancestors); } - } - } - return false; -} + if (isect->size() > curr_leaf_ancestors->size()) + curr_leaf_ancestors->resize(isect->size()); -// static void -// dump_bitset_map(std::string const & hdr, -// std::map< ctx, shared_bitmap > const & mm) -// { -// L(F("dumping [%s] (%d entries)\n") % hdr % mm.size()); -// for (std::map< ctx, shared_bitmap >::const_iterator i = mm.begin(); -// i != mm.end(); ++i) -// { -// L(F("dump [%s]: %d -> %s\n") % hdr % i->first % (*(i->second))); -// } -// } + if (curr_leaf_ancestors->size() > isect->size()) + isect->resize(curr_leaf_ancestors->size()); -bool -find_common_ancestor_for_merge(revision_id const & left, - revision_id const & right, - revision_id & anc, - app_state & app) -{ - // Temporary workaround until we figure out how to clean up the whole - // ancestor selection mess: - if (app.use_lca) - return find_least_common_ancestor(left, right, anc, app); - - interner intern; - std::map< ctx, shared_bitmap > - parents, ancestors, dominators; - - ctx ln = intern.intern(left.inner()()); - ctx rn = intern.intern(right.inner()()); - - shared_bitmap lanc = shared_bitmap(new bitmap()); - shared_bitmap ranc = shared_bitmap(new bitmap()); - shared_bitmap ldom = shared_bitmap(new bitmap()); - shared_bitmap rdom = shared_bitmap(new bitmap()); - - ancestors.insert(make_pair(ln, lanc)); - ancestors.insert(make_pair(rn, ranc)); - dominators.insert(make_pair(ln, ldom)); - dominators.insert(make_pair(rn, rdom)); - - L(F("searching for common ancestor, left=%s right=%s\n") % left % right); - - while (expand_ancestors(parents, ancestors, intern, app) | - expand_dominators(parents, dominators, intern, app)) - { - L(F("common ancestor scan [par=%d,anc=%d,dom=%d]\n") % - parents.size() % ancestors.size() % dominators.size()); - - if (find_intersecting_node(*lanc, *rdom, intern, anc)) - { - L(F("found node %d, ancestor of left %s and dominating right %s\n") - % anc % left % right); - return true; + if (i == leaves.begin()) + *isect = *curr_leaf_ancestors; + else + (*isect) &= (*curr_leaf_ancestors); } - else if (find_intersecting_node(*ranc, *ldom, intern, anc)) + // isect is now the set of common ancestors of leaves, but that is not enough. + // We need the set of leaves of isect; to do that we calculate the set of + // ancestors of isect, in order to subtract it from isect (below). + std::set new_leaves; + for (ctx i = 0; i < isect->size(); ++i) { - L(F("found node %d, ancestor of right %s and dominating left %s\n") - % anc % right % left); - return true; + if (isect->test(i)) + { + calculate_ancestors_from_graph(intern, revision_id(intern.lookup(i)), + inverse_graph, ancestors, isect_ancs); + } } - } -// dump_bitset_map("ancestors", ancestors); -// dump_bitset_map("dominators", dominators); -// dump_bitset_map("parents", parents); - return false; -} - -bool -find_least_common_ancestor(revision_id const & left, - revision_id const & right, - revision_id & anc, - app_state & app) -{ - interner intern; - std::map< ctx, shared_bitmap > - parents, ancestors; - - if (left == right) - { - anc = left; - return true; - } - - ctx ln = intern.intern(left.inner()()); - ctx rn = intern.intern(right.inner()()); - - shared_bitmap lanc = shared_bitmap(new bitmap()); - shared_bitmap ranc = shared_bitmap(new bitmap()); - - ancestors.insert(make_pair(ln, lanc)); - ancestors.insert(make_pair(rn, ranc)); - - L(F("searching for least common ancestor, left=%s right=%s\n") % left % right); - - while (expand_ancestors(parents, ancestors, intern, app)) - { - L(F("least common ancestor scan [par=%d,anc=%d]\n") % - parents.size() % ancestors.size()); - - if (find_intersecting_node(*lanc, *ranc, intern, anc)) + // Finally, the subtraction step: for any element i of isect, if + // it's *not* in isect_ancs, it survives as a new leaf. + leaves.clear(); + for (ctx i = 0; i < isect->size(); ++i) { - L(F("found node %d, ancestor of left %s and right %s\n") - % anc % left % right); - return true; + if (!isect->test(i)) + continue; + if (i < isect_ancs->size() && isect_ancs->test(i)) + continue; + safe_insert(leaves, i); } } -// dump_bitset_map("ancestors", ancestors); -// dump_bitset_map("parents", parents); - return false; + + I(leaves.size() == 1); + anc = revision_id(intern.lookup(*leaves.begin())); } - // FIXME: this algorithm is incredibly inefficient; it's O(n) where n is the // size of the entire revision graph. ======================================================================== --- revision.hh 3bd95a5f0eeee3f31b8e6f835046d757b4b63488 +++ revision.hh ee644b839e568dd7cf3302381044f87027591e61 @@ -132,18 +132,12 @@ // graph walking -bool +void find_common_ancestor_for_merge(revision_id const & left, revision_id const & right, revision_id & anc, app_state & app); -bool -find_least_common_ancestor(revision_id const & left, - revision_id const & right, - revision_id & anc, - app_state & app); - bool is_ancestor(revision_id const & ancestor, revision_id const & descendent, ======================================================================== --- work.cc 5157509316effe608f2939ffda66714edb6b2a71 +++ work.cc c20dfd83af47411efbbb99caa23ec2cbff77e9e8 @@ -721,6 +721,28 @@ return true; } +bool +get_attribute_from_roster(roster_t const & ros, + file_path const & path, + attr_key const & key, + attr_value & val) +{ + split_path sp; + path.split(sp); + if (ros.has_node(sp)) + { + node_t n = ros.get_node(sp); + full_attr_map_t::const_iterator i = n->attrs.find(manual_merge_attribute); + if (i != n->attrs.end() && i->second.first) + { + val = i->second.second; + return true; + } + } + return false; +} + + bool get_attribute_from_db(file_path const & file, std::string const & attr_key, manifest_map const & man, ======================================================================== --- work.hh 5690b5fbbff1b09efb707af80ba8b4f84c0288cf +++ work.hh ec5f5d73337dd3c6a6d601ca5b3b50303e76ce0c @@ -192,6 +192,11 @@ extern std::string const encoding_attribute; extern std::string const manual_merge_attribute; +bool get_attribute_from_roster(roster_t const & ros, + file_path const & path, + attr_key const & key, + attr_value & val); + bool get_attribute_from_db(file_path const & file, std::string const & attr_key, manifest_map const & man,