# # # patch "ChangeLog" # from [799d62f415fe212cd15ceaa5b33fdc95200df61a] # to [282997e8f7268edecac8230d6829e80eba8b7059] # # patch "automate.cc" # from [5ca3b0044cc7259d0e6200a69041afc827bd16cb] # to [e4852dbc0ac1b93e0e6acbc6ebf41d0779eb2542] # # patch "commands.cc" # from [d8547de4a2d8a7d1b0f987bb0e9456f89b78b987] # to [9a43a818853ac6ee44be8b0c572610f6b98f0af4] # # patch "restrictions.cc" # from [c98b699057f1dd5f225d5c959157cbfb139f5508] # to [e1078207872e243d0321530cdaf2a9b7b5401f3b] # # patch "restrictions.hh" # from [8938b238d676cf5d4505f7aa7d550a8b8c93238c] # to [4a05621518f8997969a33fa16195aef54f310a05] # # patch "roster.hh" # from [9b79ea5b9c4aafdfd5aa53eba86594f4d18c2b88] # to [f4ee6372abde50fbbbc2c2b9446f89c57e186184] # ============================================================ --- ChangeLog 799d62f415fe212cd15ceaa5b33fdc95200df61a +++ ChangeLog 282997e8f7268edecac8230d6829e80eba8b7059 @@ -1,3 +1,10 @@ +2006-02-12 Timothy Brownawell + + Allow a workspace to have multiple base revisions. + 'automate inventory', restricted commit, update, and revert + only work on a workspace with a single base revision + compiles, but completely untested + 2005-01-25 Matt Johnston * HACKING: add some notes about compiling - precompiled headers, ============================================================ --- automate.cc 5ca3b0044cc7259d0e6200a69041afc827bd16cb +++ automate.cc e4852dbc0ac1b93e0e6acbc6ebf41d0779eb2542 @@ -685,8 +685,10 @@ inventory_map inventory; cset cs; path_set unchanged, changed, missing, known, unknown, ignored; - E(false, F("FIXME_WORKMERGE")); -// get_base_and_current_roster_shape(base, curr, nis, app); + parentage parents; + get_parentage_and_current_roster_shape(parents, curr, nis, app); + N(parents.size() == 1, F("automate inventory does not work on a workspace with multiple parents")); + base = parents.begin()->second; make_cset(base, curr, cs); I(cs.deltas_applied.empty()); @@ -942,11 +944,11 @@ { revision_set rev; roster_t new_roster; - std::vector old_rosters; + parentage parents; app.require_working_copy(); get_unrestricted_working_revision_and_rosters(app, rev, - old_rosters, + parents, new_roster); calculate_ident(rev, ident); write_revision_set(rev, dat); @@ -988,10 +990,10 @@ if (args.size() == 0) { - std::vector old_rosters; + parentage parents; revision_set rs; app.require_working_copy(); - get_unrestricted_working_revision_and_rosters(app, rs, old_rosters, new_roster); + get_unrestricted_working_revision_and_rosters(app, rs, parents, new_roster); } else { ============================================================ --- commands.cc d8547de4a2d8a7d1b0f987bb0e9456f89b78b987 +++ commands.cc 9a43a818853ac6ee44be8b0c572610f6b98f0af4 @@ -340,34 +340,40 @@ return; inodeprint_map ipm_new; revision_set rev; - roster_t old_roster, new_roster; + roster_t new_roster; + parentage parents; get_unrestricted_working_revision_and_rosters(app, rev, - old_roster, + parents, new_roster); - + node_map const & new_nodes = new_roster.all_nodes(); for (node_map::const_iterator i = new_nodes.begin(); i != new_nodes.end(); ++i) { node_id nid = i->first; - if (old_roster.has_node(nid)) + for (parentage::iterator j = parents.begin(); j != parents.end(); ++j) { - node_t old_node = old_roster.get_node(nid); - if (is_file_t(old_node)) + roster_t & old_roster(j->second); + if (old_roster.has_node(nid)) { - node_t new_node = i->second; - I(is_file_t(new_node)); + node_t old_node = old_roster.get_node(nid); + if (is_file_t(old_node)) + { + node_t new_node = i->second; + I(is_file_t(new_node)); - file_t old_file = downcast_to_file_t(old_node); - file_t new_file = downcast_to_file_t(new_node); + file_t old_file = downcast_to_file_t(old_node); + file_t new_file = downcast_to_file_t(new_node); - if (new_file->content == old_file->content) - { - split_path sp; - new_roster.get_name(nid, sp); - file_path fp(sp); - hexenc ip; - if (inodeprint_file(fp, ip)) - ipm_new.insert(inodeprint_entry(fp, ip)); + if (new_file->content == old_file->content) + { + split_path sp; + new_roster.get_name(nid, sp); + file_path fp(sp); + hexenc ip; + if (inodeprint_file(fp, ip)) + ipm_new.insert(inodeprint_entry(fp, ip)); + break; + } } } } @@ -1295,11 +1301,12 @@ OPT_DEPTH % OPT_EXCLUDE % OPT_BRIEF) { revision_set rs; - roster_t old_roster, new_roster; + roster_t new_roster; + parentage parents; data tmp; app.require_working_copy(); - get_working_revision_and_rosters(app, args, rs, old_roster, new_roster); + get_working_revision_and_rosters(app, args, rs, parents, new_roster); if (global_sanity.brief) { @@ -1655,13 +1662,14 @@ ls_known (app_state & app, vector const & args) { revision_set rs; - roster_t old_roster, new_roster; + roster_t new_roster; + parentage parents; data tmp; app.require_working_copy(); path_set paths; - get_working_revision_and_rosters(app, args, rs, old_roster, new_roster); + get_working_revision_and_rosters(app, args, rs, parents, new_roster); new_roster.extract_path_set(paths); for (path_set::const_iterator p = paths.begin(); p != paths.end(); ++p) @@ -1677,10 +1685,11 @@ path_set & unknown, path_set & ignored) { revision_set rev; - roster_t old_roster, new_roster; + roster_t new_roster; path_set known; + parentage parents; - get_working_revision_and_rosters(app, args, rev, old_roster, new_roster); + get_working_revision_and_rosters(app, args, rev, parents, new_roster); new_roster.extract_path_set(known); file_itemizer u(app, known, unknown, ignored); @@ -1708,15 +1717,11 @@ static void find_missing (app_state & app, vector const & args, path_set & missing) { - revision_id base_rid; - roster_t base_roster; - cset included_work, excluded_work; - path_set old_paths, new_paths; + path_set new_paths; + std::vector edges; app.require_working_copy(); - get_base_roster_and_working_cset(app, args, base_rid, base_roster, - old_paths, new_paths, - included_work, excluded_work); + get_base_roster_and_working_cset(app, args, edges, new_paths); for (path_set::const_iterator i = new_paths.begin(); i != new_paths.end(); ++i) { @@ -2186,10 +2191,11 @@ throw usage(name); revision_set rs; - roster_t old_roster, new_roster; + roster_t new_roster; + parentage parents; app.require_working_copy(); - get_unrestricted_working_revision_and_rosters(app, rs, old_roster, new_roster); + get_unrestricted_working_revision_and_rosters(app, rs, parents, new_roster); file_path path = file_path_external(idx(args,1)); split_path sp; @@ -2232,9 +2238,9 @@ throw usage(name); } - cset new_work; - make_cset(old_roster, new_roster, new_work); - put_work_cset(new_work); + revision_set new_work; + make_revision_set(parents, new_roster, new_work); + put_work_revision_set(new_work); update_any_attrs(app); } else if (subcmd == "get") @@ -2306,20 +2312,36 @@ bool log_message_given; revision_set rs; revision_id rid; - roster_t old_roster, new_roster; + roster_t new_roster; + parentage_with_changes parents_and_excluded; app.make_branch_sticky(); app.require_working_copy(); // preserve excluded work for future commmits - cset excluded_work; - get_working_revision_and_rosters(app, args, rs, old_roster, new_roster, excluded_work); + get_working_revision_and_rosters(app, args, rs, + parents_and_excluded, + new_roster); calculate_ident(rs, rid); + revision_set excluded; + { + boost::shared_ptr cs(new cset()); + bool restriction_empty = true; + for (parentage_with_changes::iterator i + = parents_and_excluded.begin(); + i != parents_and_excluded.end(); ++i) + if (!i->second.second.empty()) + restriction_empty = false; + N(restriction_empty || parents_and_excluded.size() == 1, + F("Cannot use a restricted commit when the workspace has multiple base revisions.")); + *cs = parents_and_excluded.begin()->second.second; + excluded.edges.insert(make_pair(rid, cs)); + } + N(rs.is_nontrivial(), F("no changes to commit\n")); cert_value branchname; - I(rs.edges.size() == 1); set heads; get_branch_heads(app.branch_name(), app, heads); @@ -2375,76 +2397,78 @@ // new revision L(FL("inserting new revision %s\n") % rid); - I(rs.edges.size() == 1); - edge_map::const_iterator edge = rs.edges.begin(); - I(edge != rs.edges.end()); + I(rs.edges.size() >= 1); + for (edge_map::const_iterator edge = rs.edges.begin(); + edge != rs.edges.end(); ++edge) + { - // process file deltas or new files - cset const & cs = edge_changes(edge); + // process file deltas or new files + cset const & cs = edge_changes(edge); - for (std::map >::const_iterator i = cs.deltas_applied.begin(); - i != cs.deltas_applied.end(); ++i) - { - file_path path(i->first); - file_id old_content = i->second.first; - file_id new_content = i->second.second; + for (std::map >::const_iterator i = cs.deltas_applied.begin(); + i != cs.deltas_applied.end(); ++i) + { + file_path path(i->first); + file_id old_content = i->second.first; + file_id new_content = i->second.second; - if (app.db.file_version_exists(new_content)) - { - L(FL("skipping file delta %s, already in database\n") - % delta_entry_dst(i)); - } - else if (app.db.file_version_exists(old_content)) - { - L(FL("inserting delta %s -> %s\n") - % old_content % new_content); - file_data old_data; - data new_data; - app.db.get_file_version(old_content, old_data); - read_localized_data(path, new_data, app.lua); - // sanity check - hexenc tid; - calculate_ident(new_data, tid); - N(tid == new_content.inner(), - F("file '%s' modified during commit, aborting") - % path); - delta del; - diff(old_data.inner(), new_data, del); - dbw.consume_file_delta(old_content, - new_content, - file_delta(del)); - } - else - { - L(FL("inserting full version %s\n") % new_content); - data new_data; - read_localized_data(path, new_data, app.lua); - // sanity check - hexenc tid; - calculate_ident(new_data, tid); - N(tid == new_content.inner(), - F("file '%s' modified during commit, aborting") - % path); - dbw.consume_file_data(new_content, file_data(new_data)); - } - } + if (app.db.file_version_exists(new_content)) + { + L(FL("skipping file delta %s, already in database\n") + % delta_entry_dst(i)); + } + else if (app.db.file_version_exists(old_content)) + { + L(FL("inserting delta %s -> %s\n") + % old_content % new_content); + file_data old_data; + data new_data; + app.db.get_file_version(old_content, old_data); + read_localized_data(path, new_data, app.lua); + // sanity check + hexenc tid; + calculate_ident(new_data, tid); + N(tid == new_content.inner(), + F("file '%s' modified during commit, aborting") + % path); + delta del; + diff(old_data.inner(), new_data, del); + dbw.consume_file_delta(old_content, + new_content, + file_delta(del)); + } + else + { + L(FL("inserting full version %s\n") % new_content); + data new_data; + read_localized_data(path, new_data, app.lua); + // sanity check + hexenc tid; + calculate_ident(new_data, tid); + N(tid == new_content.inner(), + F("file '%s' modified during commit, aborting") + % path); + dbw.consume_file_data(new_content, file_data(new_data)); + } + } - for (std::map::const_iterator i = cs.files_added.begin(); - i != cs.files_added.end(); ++i) - { - file_path path(i->first); - file_id new_content = i->second; + for (std::map::const_iterator i = cs.files_added.begin(); + i != cs.files_added.end(); ++i) + { + file_path path(i->first); + file_id new_content = i->second; - L(FL("inserting full version %s\n") % new_content); - data new_data; - read_localized_data(path, new_data, app.lua); - // sanity check - hexenc tid; - calculate_ident(new_data, tid); - N(tid == new_content.inner(), - F("file '%s' modified during commit, aborting") - % path); - dbw.consume_file_data(new_content, file_data(new_data)); + L(FL("inserting full version %s\n") % new_content); + data new_data; + read_localized_data(path, new_data, app.lua); + // sanity check + hexenc tid; + calculate_ident(new_data, tid); + N(tid == new_content.inner(), + F("file '%s' modified during commit, aborting") + % path); + dbw.consume_file_data(new_content, file_data(new_data)); + } } } @@ -2466,8 +2490,7 @@ } // small race condition here... - put_work_cset(excluded_work); - put_revision_id(rid); + put_work_revision_set(excluded); P(F("committed revision %s\n") % rid); blank_user_log(); @@ -2688,7 +2711,6 @@ "try adding --external or removing --diff-args?")); cset composite; - cset excluded; // initialize before transaction so we have a database to work with @@ -2699,17 +2721,17 @@ if (app.revision_selectors.size() == 0) { + parentage parents; get_working_revision_and_rosters(app, args, r_new, - old_roster, - new_roster, - excluded); + parents, + new_roster); - I(r_new.edges.size() == 1 || r_new.edges.size() == 0); + N(r_new.edges.size() == 1 || r_new.edges.size() == 0, + F("Your workspace has multiple base revisions, so you must specify which one to diff against.")); if (r_new.edges.size() == 1) composite = edge_changes(r_new.edges.begin()); new_is_archived = false; - revision_id old_rid; - get_revision_id(old_rid); + revision_id old_rid = edge_old_revision(r_new.edges.begin()); header << "# old_revision [" << old_rid << "]" << endl; } else if (app.revision_selectors.size() == 1) @@ -2718,14 +2740,13 @@ complete(app, idx(app.revision_selectors, 0)(), r_old_id); N(app.db.revision_exists(r_old_id), F("no such revision '%s'") % r_old_id); + parentage parents; get_working_revision_and_rosters(app, args, r_new, - old_roster, - new_roster, - excluded); + parents, + new_roster); // Clobber old_roster with the one specified app.db.get_revision(r_old_id, r_old); app.db.get_roster(r_old_id, old_roster); - I(r_new.edges.size() == 1 || r_new.edges.size() == 0); N(r_new.edges.size() == 1, F("current revision has no ancestor")); new_is_archived = false; header << "# old_revision [" << r_old_id << "]" << endl; @@ -2846,10 +2867,14 @@ // load the base roster twice. The API could use some factoring or // such. But it should work for now; revisit if performance is // intolerable. - - get_unrestricted_working_revision_and_rosters(app, r_working, - *old_roster, - working_roster); + { + parentage parents; + get_unrestricted_working_revision_and_rosters(app, r_working, + parents, + working_roster); + N(parents.size() == 1, F("Update does not work on workspaces with multiple base revisions.")); + *old_roster = parents.begin()->second; + } calculate_ident(r_working, r_working_id); I(r_working.edges.size() == 1); r_old_id = edge_old_revision(r_working.edges.begin()); @@ -3006,14 +3031,17 @@ // small race condition here... // nb: we write out r_chosen, not r_new, because the revision-on-disk // is the basis of the working copy, not the working copy itself. - put_revision_id(r_chosen_id); + { + revision_set rs; + boost::shared_ptr cs(new cset(remaining)); + rs.edges.insert(make_pair(r_chosen_id, cs)); + } if (!app.branch_name().empty()) { app.make_branch_sticky(); } P(F("updated to base revision %s\n") % r_chosen_id); - put_work_cset(remaining); update_any_attrs(app); maybe_update_inodeprints(app); } @@ -3300,15 +3328,20 @@ revision_id old_revision_id; cset work, included_work, excluded_work; path_set old_paths; + revision_set rs; + parentage parents; if (args.size() < 1 && !app.missing) throw usage(name); app.require_working_copy(); - get_base_revision(app, old_revision_id, old_roster); + get_workspace_parentage_and_revision(app, parents, rs); + N(parents.size() == 1, F("Cannot revert a workspace with multiple base revisions.")); + old_revision_id = parents.begin()->first; + old_roster = parents.begin()->second; + work = edge_changes(rs.edges.begin()); - get_work_cset(work); old_roster.extract_path_set(old_paths); path_set valid_paths(old_paths); @@ -3390,7 +3423,8 @@ } // race - put_work_cset(excluded_work); + *rs.edges.begin()->second = excluded_work; + put_work_revision_set(rs); update_any_attrs(app); maybe_update_inodeprints(app); } @@ -3492,7 +3526,12 @@ file.split(sp); if (app.revision_selectors.size() == 0) - get_revision_id(rid); + { + revision_set rs; + get_work_revision_set(rs); + N(rs.edges.size() == 1, F("Your working copy has multiple base revisions, you must specify one with --revision")); + rid = edge_old_revision(rs.edges.begin()); + } else complete(app, idx(app.revision_selectors, 0)(), rid); @@ -3526,11 +3565,14 @@ set frontier; - revision_id first_rid; + revision_id first_rid; // only used if the user gave --revision if (app.revision_selectors.size() == 0) { - get_revision_id(first_rid); - frontier.insert(first_rid); + revision_set rs; + get_work_revision_set(rs); + for (edge_map::const_iterator i = rs.edges.begin(); + i != rs.edges.end(); ++i) + frontier.insert(edge_old_revision(i)); } else { @@ -3548,11 +3590,12 @@ if (args.size() > 0) { // User wants to trace only specific files - roster_t old_roster, new_roster; + roster_t new_roster; + parentage parents; revision_set rev; if (app.revision_selectors.size() == 0) - get_unrestricted_working_revision_and_rosters(app, rev, old_roster, new_roster); + get_unrestricted_working_revision_and_rosters(app, rev, parents, new_roster); else app.db.get_roster(first_rid, new_roster); @@ -3741,8 +3784,8 @@ dir = "."; app.create_working_copy(dir); - revision_id null; - put_revision_id(null); + revision_set null; + put_work_revision_set(null); } CMD(automate, N_("automation"), ============================================================ --- restrictions.cc c98b699057f1dd5f225d5c959157cbfb139f5508 +++ restrictions.cc e1078207872e243d0321530cdaf2a9b7b5401f3b @@ -234,14 +234,14 @@ get_working_revision_and_rosters(app_state & app, std::vector const & args, revision_set & rev, - std::vector > & old_rosters_and_excluded, + parentage_with_changes & parents_with_excluded, roster_t & new_roster) { path_set new_paths; manifest_id new_manifest_id; rev.edges.clear(); - old_rosters_and_excluded.clear(); + parents_with_excluded.clear(); std::vector edges; get_base_roster_and_working_cset(app, args, edges, new_paths); @@ -295,6 +295,7 @@ } safe_insert(rev.edges, std::make_pair(old_revision_id, cs)); + safe_insert(parents_with_excluded, std::make_pair(old_revision_id, std::make_pair(new_roster, *cs))); } } @@ -302,28 +303,28 @@ get_working_revision_and_rosters(app_state & app, std::vector const & args, revision_set & rev, - std::vector & old_rosters, + parentage & parents, roster_t & new_roster) { - old_rosters.clear(); - std::vector > old_and_excluded; + parents.clear(); + parentage_with_changes old_and_excluded; get_working_revision_and_rosters(app, args, rev, old_and_excluded, new_roster); - for (std::vector >::iterator i + for (parentage_with_changes::iterator i = old_and_excluded.begin(); i != old_and_excluded.end(); ++i) - old_rosters.push_back(i->first); + safe_insert(parents, std::make_pair(i->first, i->second.first)); } void get_unrestricted_working_revision_and_rosters(app_state & app, revision_set & rev, - std::vector & old_rosters, + parentage & parents, roster_t & new_roster) { std::vector empty_args; std::set saved_exclude_patterns(app.exclude_patterns); app.exclude_patterns.clear(); - get_working_revision_and_rosters(app, empty_args, rev, old_rosters, new_roster); + get_working_revision_and_rosters(app, empty_args, rev, parents, new_roster); app.exclude_patterns = saved_exclude_patterns; } ============================================================ --- restrictions.hh 8938b238d676cf5d4505f7aa7d550a8b8c93238c +++ restrictions.hh 4a05621518f8997969a33fa16195aef54f310a05 @@ -73,7 +73,7 @@ get_working_revision_and_rosters(app_state & app, std::vector const & args, revision_set & rev, - std::vector > & old_rosters_and_excluded, + parentage_with_changes & parents_with_excluded, roster_t & new_roster); // Same as above, only without the "excluded" out-parameter. @@ -81,13 +81,13 @@ get_working_revision_and_rosters(app_state & app, std::vector const & args, revision_set & rev, - std::vector & old_rosters, + parentage & parents, roster_t & new_roster); void get_unrestricted_working_revision_and_rosters(app_state & app, revision_set & rev, - std::vector & old_rosters, + parentage & parents, roster_t & new_roster); void ============================================================ --- roster.hh 9b79ea5b9c4aafdfd5aa53eba86594f4d18c2b88 +++ roster.hh f4ee6372abde50fbbbc2c2b9446f89c57e186184 @@ -291,6 +291,7 @@ cset & cs); typedef std::map parentage; +typedef std::map > parentage_with_changes; void make_revision_set(parentage const & parents,