#
# add_file "tests/t_ambiguous_tags.at"
#
# add_file "tests/t_kill_tag_locally.at"
#
# patch "AUTHORS"
# from [37ef2b20900f21d662142e2f05c6d8b0da61e6a4]
# to [36534b3b81f00bdef9268973bd83825512b61098]
#
# patch "ChangeLog"
# from [8660553a29b07571e429faf719cf633ed8bf745b]
# to [37ece97991f08515c18cf1d12a9330b1a5acefd3]
#
# patch "commands.cc"
# from [be830b7f5140b32a670940776adf102456c95168]
# to [400f88c38c48ba139b2ee56f8d2a7791516d6ecc]
#
# patch "constants.cc"
# from [dcbffe20599c26310957ee7c1310a44ae93dc2a0]
# to [7d39c51c90d4e9be993c9cef054af6d52cb8abe6]
#
# patch "constants.hh"
# from [94b12534ecf00b0d10056ab3edf9fad2e38eb300]
# to [e44952936dcfaf9f158865b67046394e8805763a]
#
# patch "contrib/ciabot_monotone.py"
# from [8eb270dff65b9be32974d139b0e6d8ba720902d4]
# to [53817a19420055452a332b922bd85e414b2b042f]
#
# patch "database.cc"
# from [9d64df5fbd6ea4685b9a556ad44ed025fd8d8242]
# to [8ba130dca3d9ea3af77ff4ca881859111ffdabb3]
#
# patch "database.hh"
# from [de2b03ddda1c727339e5a26b9359721b7c921319]
# to [5689e4a21edef9cc051cade4cc56518d492dfffd]
#
# patch "monotone.texi"
# from [874fbe044a630abf6de449c5e39a511e9091db40]
# to [b19c0f2cedcd918ffe81dae204ec6ce7e2b924a7]
#
# patch "netsync.cc"
# from [b0e674d2488fab67c11d3b1ddff075f2a950072f]
# to [035d959611e3460450609a2e87a89bf42ddc3fe8]
#
# patch "rcs_import.cc"
# from [b90ec8325f19892896d9692fc01893c9d8816594]
# to [9a42135b663a3f926d78159eb765e19014ece69c]
#
# patch "schema_migration.cc"
# from [0fbb5b522f316fd626766146e2196e3267ce29bf]
# to [06a7e2c801bb9d6e789f3883fa781a394993250a]
#
# patch "tests/t_ambiguous_tags.at"
# from []
# to [de01b6e6641e896d423ac223cbfdebfe8ccd5dfe]
#
# patch "tests/t_cvsimport_drepper2.at"
# from [c0d315c7c6fd5077592b8d81ecbc8a801acc3e41]
# to [530e44ff587e4ad3ce3ca95026f1990f505c8e5e]
#
# patch "tests/t_kill_tag_locally.at"
# from []
# to [b091837656ce01a0b6ecf80706725d3251e00abf]
#
# patch "testsuite.at"
# from [e85a77ab1dec51c5e6e40ff821ad8742b3a1b927]
# to [35beb1e1f80eab0411f3bd2783d34a989062e82c]
#
--- AUTHORS
+++ AUTHORS
@@ -63,6 +63,7 @@
Ethan Blanton
Eric Anderson
Kaushik Veeraraghavan
+ Jordan Breeding
supporting files:
-----------------
--- ChangeLog
+++ ChangeLog
@@ -1,9 +1,62 @@
-2005-07-13 Derek Scherger
+2005-07-13 Nathaniel Smith
- * ChangeLog:
- * database.cc:
- * schema_migration.cc: merge cleanup
+ * monotone.texi (Database): Document kill_tag_locally.
+2005-07-13 Nathaniel Smith
+
+ * tests/t_kill_tag_locally.at, tests/t_ambiguous_tags.at: New
+ tests.
+ * testsuite.at: Add them.
+
+2005-07-11 graydon hoare
+
+ * AUTHORS: Add Jordan.
+ * commands.cc (ls_tags): Do not uniquify tags.
+ * constants.{cc,hh} (cvs_window): Change to time_t, tighten to 5 minutes.
+ * rcs_import.cc (window): Remove.
+ (note_type): Remove dead code.
+ (is_sbr): Add test for synthetic branch roots.
+ (cvs_commit::is_synthetic_branch_root): New test.
+ (process_branch): Skip synthetic branch roots, push new branch
+ before picking branch to mark, rather than after.
+ (cvs_history::index_branchpoint_symbols): Handle vendor branches.
+ (cvs_history::push_branch): Do not duplicate root on private branches.
+ (import_branch): Fix up cluster inference.
+ (cluster_consumer::consume_cluster): New invariant.
+ * tests/t_cvsimport_drepper2.at: Modify to reflect fixes.
+
+2005-07-11 Jordan Breeding
+
+ * commands.cc (db): New subcommand "kill_tag_locally"
+ * database.{cc,hh} (delete_tag_named): New function.
+
+2005-07-12 Nathaniel Smith
+
+ * schema_migration.cc (migrator::migrate): When there is nothing
+ to be done, do nothing.
+
+2005-07-12 Nathaniel Smith
+
+ * netsync.cc (rebuild_merkle_trees): Reduce memory usage a bit,
+ and don't insert branch certs that the other side will just end up
+ throwing away (reduces network traffic).
+
+2005-07-12 Nathaniel Smith
+
+ * testsuite.at (NETSYNC_SERVE_START, NETSYNC_SERVE_N_START):
+ Really, really really fix up quoting. Really.
+ I hope.
+
+2005-07-12 Nathaniel Smith
+
+ * contrib/ciabot_monotone.py (config.project_for_branch): Clarify
+ comment text for non-Python programmers.
+
+2005-07-12 Nathaniel Smith
+
+ * testsuite.at (NETSYNC_SERVE_START, NETSYNC_SERVE_N_START): Fixup
+ quoting.
+
2005-07-11 Nathaniel Smith
* crypto_tests.cc: New SHA1 correctness tests from Kaushik Veeraraghavan.
@@ -320,7 +373,7 @@
added --no-merges option provides a means to disable the merge
entries).
-2005-06-26 Matthew Gregan
* tests/t_automate_stdio.at, tests/t_cvsimport_drepper.at,
tests/t_selector_later_earlier.at: Further canonicalisation of
--- commands.cc
+++ commands.cc
@@ -1623,20 +1623,23 @@
vector< revision > certs;
app.db.get_revision_certs(tag_cert_name, certs);
- std::map > sorted_certs;
+ std::set< pair > > sorted_vals;
- for (size_t i = 0; i < certs.size(); ++i)
+ for (vector< revision >::const_iterator i = certs.begin();
+ i != certs.end(); ++i)
{
cert_value name;
- decode_base64(idx(certs, i).inner().value, name);
- sorted_certs.insert(std::make_pair(name, idx(certs, i)));
+ cert c = i->inner();
+ decode_base64(c.value, name);
+ sorted_vals.insert(std::make_pair(name, std::make_pair(c.ident, c.key)));
}
- for (std::map >::const_iterator i = sorted_certs.begin();
- i != sorted_certs.end(); ++i)
+ for (std::set > >::const_iterator i = sorted_vals.begin();
+ i != sorted_vals.end(); ++i)
{
cout << i->first << " "
- << i->second.inner().ident << " "
- << i->second.inner().key << endl;
+ << i->second.first << " "
+ << i->second.second << endl;
}
}
@@ -2129,6 +2132,7 @@
"execute\n"
"kill_rev_locally ID\n"
"kill_branch_locally BRANCH\n"
+ "kill_tag_locally TAG\n"
"check\n"
"changesetify\n"
"rebuild\n"
@@ -2169,6 +2173,8 @@
app.db.clear_epoch(cert_value(idx(args, 1)()));
else if (idx(args, 0)() == "kill_branch_locally")
app.db.delete_branch_named(cert_value(idx(args, 1)()));
+ else if (idx(args, 0)() == "kill_tag_locally")
+ app.db.delete_tag_named(cert_value(idx(args, 1)()));
else
throw usage(name);
}
@@ -2989,9 +2995,10 @@
// we have the following
//
- // old --- working
- // \ \
- // chosen --- merged
+ // old --> working
+ // | |
+ // V V
+ // chosen --> merged
//
// - old is the revision specified in MT/revision
// - working is based on old and includes the working copy's changes
--- constants.cc
+++ constants.cc
@@ -32,7 +32,7 @@
// number of seconds in window, in which to consider CVS commits equivalent
// if they have otherwise compatible contents (author, changelog)
- size_t const cvs_window = 3600 * 3;
+ time_t const cvs_window = 60 * 5;
// number of bytes in a password buffer. further bytes will be dropped.
size_t const maxpasswd = 0xfff;
--- constants.hh
+++ constants.hh
@@ -33,7 +33,7 @@
// number of seconds in window, in which to consider CVS commits equivalent
// if they have otherwise compatible contents (author, changelog)
- extern size_t const cvs_window;
+ extern time_t const cvs_window;
// number of bytes in a password buffer. further bytes will be dropped.
extern size_t const maxpasswd;
--- contrib/ciabot_monotone.py
+++ contrib/ciabot_monotone.py
@@ -27,8 +27,10 @@
class config:
def project_for_branch(self, branchname):
- # Customize this if you are watching multiple different projects.
- # Return None if changes to the given branch are uninteresting.
+ # Customize this to return your project name(s). If changes to the
+ # given branch are uninteresting -- i.e., changes to them should be
+ # ignored entirely -- then return the python constant None (which is
+ # distinct from the string "None", a valid but poor project name!).
#if branchname.startswith("net.venge.monotone-viz"):
# return "monotone-viz"
#elif branchname.startswith("net.venge.monotone.contrib.monotree"):
--- database.cc
+++ database.cc
@@ -1564,6 +1564,17 @@
encoded().c_str());
}
+/// Deletes all certs referring to a particular tag.
+void
+database::delete_tag_named(cert_value const & tag)
+{
+ base64 encoded;
+ encode_base64(tag, encoded);
+ L(F("Deleting all references to tag %s\n") % tag);
+ execute("DELETE FROM revision_certs WHERE name='tag' AND value ='%s'",
+ encoded().c_str());
+}
+
// crypto key management
void
--- database.hh
+++ database.hh
@@ -310,6 +310,8 @@
void delete_branch_named(cert_value const & branch);
+ void delete_tag_named(cert_value const & tag);
+
// crypto key / cert operations
void get_key_ids(std::string const & pattern,
--- monotone.texi
+++ monotone.texi
@@ -4425,6 +4425,19 @@
you sync, unless the owners of those databases also delete those
certificates locally.
address@hidden monotone db kill_tag_locally @var{tag}
+
+This command ``kills'' a tag by deleting all tag certs with that tag
+name. You should consider carefully whether you want to use it, because
+it can irrevocably delete important information. It does not modify or
+delete any revisions, or any of the other certificates on tagged
+revisions; it simply removes all tag certificates with the given name.
+As with @command{db kill_rev_locally}, it only deletes the information
+from your local database; if there are other databases that you sync
+with which have this tag, the tag certificates will reappear when you
+sync, unless the owners of those databases also delete those
+certificates locally.
+
@item monotone db execute @var{sql-statement}
This is a debugging command which executes @var{sql-statement} against
--- netsync.cc
+++ netsync.cc
@@ -3634,7 +3634,8 @@
set revision_ids;
set inserted_keys;
-
+
+ set< hexenc > bad_branch_certs;
{
// get all matching branch names
vector< revision > certs;
@@ -3648,77 +3649,80 @@
insert_with_parents(revision_id(idx(certs, i).inner().ident),
revision_ids, app);
}
+ else
+ bad_branch_certs.insert(idx(certs, i).inner().ident);
}
+ }
- // FIXME: we should probably include epochs for all branches mentioned in
- // any included branch cert, rather than just for branches included by the
- // branch mask
+ {
+ map epochs;
+ app.db.get_epochs(epochs);
+
+ epoch_data epoch_zero(std::string(constants::epochlen, '0'));
+ for (std::set::const_iterator i = branchnames.begin();
+ i != branchnames.end(); ++i)
+ {
+ cert_value branch((*i)());
+ std::map::const_iterator j;
+ j = epochs.find(branch);
+ // set to zero any epoch which is not yet set
+ if (j == epochs.end())
+ {
+ L(F("setting epoch on %s to zero\n") % branch);
+ epochs.insert(std::make_pair(branch, epoch_zero));
+ app.db.set_epoch(branch, epoch_zero);
+ }
+ // then insert all epochs into merkle tree
+ j = epochs.find(branch);
+ I(j != epochs.end());
+ epoch_id eid;
+ epoch_hash_code(j->first, j->second, eid);
+ id raw_hash;
+ decode_hexenc(eid.inner(), raw_hash);
+ insert_into_merkle_tree(*etab, epoch_item, true, raw_hash(), 0);
+ }
+ }
+
+ typedef std::vector< std::pair,
+ std::pair > > cert_idx;
+
+ cert_idx idx;
+ app.db.get_revision_cert_index(idx);
+
+ // insert all certs and keys reachable via these revisions,
+ // except for branch certs that don't match the masks (since the other
+ // side will just discard them anyway)
+ for (cert_idx::const_iterator i = idx.begin(); i != idx.end(); ++i)
{
- map epochs;
- app.db.get_epochs(epochs);
-
- epoch_data epoch_zero(std::string(constants::epochlen, '0'));
- for (std::set::const_iterator i = branchnames.begin();
- i != branchnames.end(); ++i)
+ hexenc const & hash = i->first;
+ revision_id const & ident = i->second.first;
+ rsa_keypair_id const & key = i->second.second;
+
+ if (revision_ids.find(ident) == revision_ids.end())
+ continue;
+ if (bad_branch_certs.find(hash) != bad_branch_certs.end())
+ continue;
+
+ id raw_hash;
+ decode_hexenc(hash, raw_hash);
+ insert_into_merkle_tree(*ctab, cert_item, true, raw_hash(), 0);
+ ++certs_ticker;
+ if (inserted_keys.find(key) == inserted_keys.end())
{
- cert_value branch((*i)());
- std::map::const_iterator j;
- j = epochs.find(branch);
- // set to zero any epoch which is not yet set
- if (j == epochs.end())
+ if (app.db.public_key_exists(key))
{
- L(F("setting epoch on %s to zero\n") % branch);
- epochs.insert(std::make_pair(branch, epoch_zero));
- app.db.set_epoch(branch, epoch_zero);
+ base64 pub_encoded;
+ app.db.get_key(key, pub_encoded);
+ hexenc keyhash;
+ key_hash_code(key, pub_encoded, keyhash);
+ decode_hexenc(keyhash, raw_hash);
+ insert_into_merkle_tree(*ktab, key_item, true, raw_hash(), 0);
+ ++keys_ticker;
}
- // then insert all epochs into merkle tree
- j = epochs.find(branch);
- I(j != epochs.end());
- epoch_id eid;
- epoch_hash_code(j->first, j->second, eid);
- id raw_hash;
- decode_hexenc(eid.inner(), raw_hash);
- insert_into_merkle_tree(*etab, epoch_item, true, raw_hash(), 0);
+ inserted_keys.insert(key);
}
}
- typedef std::vector< std::pair,
- std::pair > > cert_idx;
-
- cert_idx idx;
- app.db.get_revision_cert_index(idx);
-
- // insert all certs and keys reachable via these revisions
- for (cert_idx::const_iterator i = idx.begin(); i != idx.end(); ++i)
- {
- hexenc const & hash = i->first;
- revision_id const & ident = i->second.first;
- rsa_keypair_id const & key = i->second.second;
-
- if (revision_ids.find(ident) == revision_ids.end())
- continue;
-
- id raw_hash;
- decode_hexenc(hash, raw_hash);
- insert_into_merkle_tree(*ctab, cert_item, true, raw_hash(), 0);
- ++certs_ticker;
- if (inserted_keys.find(key) == inserted_keys.end())
- {
- if (app.db.public_key_exists(key))
- {
- base64 pub_encoded;
- app.db.get_key(key, pub_encoded);
- hexenc keyhash;
- key_hash_code(key, pub_encoded, keyhash);
- decode_hexenc(keyhash, raw_hash);
- insert_into_merkle_tree(*ktab, key_item, true, raw_hash(), 0);
- ++keys_ticker;
- }
- inserted_keys.insert(key);
- }
- }
- }
-
recalculate_merkle_codes(*etab, get_root_prefix().val, 0);
recalculate_merkle_codes(*ktab, get_root_prefix().val, 0);
recalculate_merkle_codes(*ctab, get_root_prefix().val, 0);
--- rcs_import.cc
+++ rcs_import.cc
@@ -42,8 +42,6 @@
#include "transforms.hh"
#include "ui.hh"
-int window = 3600 * 3;
-
using namespace std;
using boost::shared_ptr;
using boost::scoped_ptr;
@@ -67,6 +65,7 @@
file_id const & ident,
cvs_history & cvs);
+ bool is_synthetic_branch_root;
time_t time;
bool alive;
cvs_author author;
@@ -92,11 +91,24 @@
map live_at_beginning;
vector lineage;
+ cvs_branch()
+ : last_branchpoint(0),
+ first_commit(0)
+ {
+ }
+
void note_commit(time_t now)
{
+ if (!has_a_commit)
+ {
+ first_commit = now;
+ }
+ else
+ {
+ if (now < first_commit)
+ first_commit = now;
+ }
has_a_commit = true;
- if (now < first_commit)
- first_commit = now;
}
void note_branchpoint(time_t now)
@@ -108,15 +120,22 @@
time_t beginning() const
{
- I(has_a_branchpoint || has_a_commit);
+ I(has_a_branchpoint || has_a_commit);
if (has_a_commit)
- return first_commit;
+ {
+ I(first_commit != 0);
+ return first_commit;
+ }
else
- return last_branchpoint;
+ {
+ I(last_branchpoint != 0);
+ return last_branchpoint;
+ }
}
void append_commit(cvs_commit const & c)
{
+ I(c.time != 0);
note_commit(c.time);
lineage.push_back(c);
}
@@ -173,15 +192,39 @@
void index_branchpoint_symbols(rcs_file const & r);
-
- enum note_type { note_branchpoint,
- note_branch_first_commit };
-
void push_branch(string const & branch_name, bool private_branch);
void pop_branch();
};
+static bool
+is_sbr(shared_ptr dl,
+ shared_ptr dt)
+{
+
+ // CVS abuses the RCS format a bit (ha!) when storing a file which
+ // was only added on a branch: on the root of the branch there'll be
+ // a commit with dead state, empty text, and a log message
+ // containing the string "file foo was initially added on branch
+ // bar". We recognize and ignore these cases, as they do not
+ // "really" represent commits to be clustered together.
+
+ if (dl->state != "dead")
+ return false;
+
+ if (!dt->text.empty())
+ return false;
+
+ string log_bit = "was initially added on branch";
+ string::const_iterator i = search(dt->log.begin(),
+ dt->log.end(),
+ log_bit.begin(),
+ log_bit.end());
+
+ return i != dt->log.end();
+}
+
+
cvs_commit::cvs_commit(rcs_file const & r,
string const & rcs_version,
file_id const & ident,
@@ -217,8 +260,14 @@
time = mktime(&t);
L(F("= %i\n") % time);
+ is_synthetic_branch_root = is_sbr(delta->second,
+ deltatext->second);
+
alive = delta->second->state != "dead";
- changelog = cvs.changelog_interner.intern(deltatext->second->log);
+ if (is_synthetic_branch_root)
+ changelog = cvs.changelog_interner.intern("synthetic branch root changelog");
+ else
+ changelog = cvs.changelog_interner.intern(deltatext->second->log);
author = cvs.author_interner.intern(delta->second->author);
path = cvs.curr_file_interned;
version = cvs.file_version_interner.intern(ident.inner()());
@@ -543,8 +592,11 @@
L(F("version %s has %d lines\n") % curr_version % curr_lines->size());
cvs_commit curr_commit(r, curr_version, curr_id, cvs);
- cvs.stk.top()->append_commit(curr_commit);
- ++cvs.n_versions;
+ if (!curr_commit.is_synthetic_branch_root)
+ {
+ cvs.stk.top()->append_commit(curr_commit);
+ ++cvs.n_versions;
+ }
string next_version = r.deltas.find(curr_version)->second->next;
@@ -569,8 +621,8 @@
{
for (ity i = range.first; i != range.second; ++i)
{
+ cvs.push_branch(i->second, false);
shared_ptr b = cvs.stk.top();
- cvs.push_branch(i->second, false);
if (curr_commit.alive)
b->live_at_beginning[cvs.curr_file_interned] = curr_commit.version;
b->note_branchpoint(curr_commit.time);
@@ -748,25 +800,58 @@
vector components;
split_version(num, components);
+ vector first_entry_components;
+ vector branchpoint_components;
+
if (components.size() > 2 &&
- components[components.size() - 2] == string("0"))
+ (components.size() % 2 == 1))
+ {
+ // this is a "vendor" branch
+ //
+ // such as "1.1.1", where "1.1" is the branchpoint and
+ // "1.1.1.1" will be the first commit on it.
+
+ first_entry_components = components;
+ first_entry_components.push_back("1");
+
+ branchpoint_components = components;
+ branchpoint_components.erase(branchpoint_components.end() - 1,
+ branchpoint_components.end());
+
+ }
+
+ else if (components.size() > 2 &&
+ (components.size() % 2 == 0) &&
+ components[components.size() - 2] == string("0"))
{
- string first_entry_version;
- components[components.size() - 2] = components[components.size() - 1];
- components[components.size() - 1] = string("1");
- join_version(components, first_entry_version);
+ // this is a "normal" branch
+ //
+ // such as "1.3.0.2", where "1.3" is the branchpoint and
+ // "1.3.2.1"
- L(F("first version in branch %s would be %s\n")
- % sym % first_entry_version);
- branch_first_entries.insert(make_pair(first_entry_version, sym));
+ first_entry_components = components;
+ first_entry_components[first_entry_components.size() - 2]
+ = first_entry_components[first_entry_components.size() - 1];
+ first_entry_components[first_entry_components.size() - 1]
+ = string("1");
+
+ branchpoint_components = components;
+ branchpoint_components.erase(branchpoint_components.end() - 2,
+ branchpoint_components.end());
+ }
- string branchpoint_version;
- components.erase(components.end() - 2, components.end());
- join_version(components, branchpoint_version);
+ string first_entry_version;
+ join_version(first_entry_components, first_entry_version);
+
+ L(F("first version in branch %s would be %s\n")
+ % sym % first_entry_version);
+ branch_first_entries.insert(make_pair(first_entry_version, sym));
- L(F("file branchpoint for %s at %s\n") % sym % branchpoint_version);
- branchpoints.insert(make_pair(branchpoint_version, sym));
- }
+ string branchpoint_version;
+ join_version(branchpoint_components, branchpoint_version);
+
+ L(F("file branchpoint for %s at %s\n") % sym % branchpoint_version);
+ branchpoints.insert(make_pair(branchpoint_version, sym));
}
}
@@ -782,8 +867,9 @@
if (private_branch)
{
- stk.push(stk.top());
- bstk.push(bstk.top());
+ branch = shared_ptr(new cvs_branch());
+ stk.push(branch);
+ bstk.push(branch_interner.intern(""));
return;
}
else
@@ -1010,22 +1096,25 @@
unsigned long commits_remaining = branch->lineage.size();
// step 1: sort the lineage
- sort(branch->lineage.begin(), branch->lineage.end());
+ stable_sort(branch->lineage.begin(), branch->lineage.end());
for (vector::const_iterator i = branch->lineage.begin();
i != branch->lineage.end(); ++i)
{
commits_remaining--;
- L(F("examining next commit [t:%d] [a:%d] [c:%d]\n")
- % i->time % i->author % i->changelog);
+ L(F("examining next commit [t:%d] [p:%s] [a:%s] [c:%s]\n")
+ % i->time
+ % cvs.path_interner.lookup(i->path)
+ % cvs.author_interner.lookup(i->author)
+ % cvs.changelog_interner.lookup(i->changelog));
// step 2: expire all clusters from the beginning of the set which
// have passed the window size
while (!clusters.empty())
{
cluster_set::const_iterator j = clusters.begin();
- if ((*j)->first_time + window < i->time)
+ if ((*j)->first_time + constants::cvs_window < i->time)
{
L(F("expiring cluster\n"));
cons.consume_cluster(**j, false);
@@ -1037,18 +1126,32 @@
// step 3: find the last still-live cluster to have touched this
// file
- time_t last_modify_time = 0;
+ time_t time_of_last_cluster_touching_this_file = 0;
+
+ unsigned clu = 0;
for (cluster_set::const_iterator j = clusters.begin();
j != clusters.end(); ++j)
- {
+ {
+ L(F("examining cluster %d to see if it touched %d\n")
+ % clu++
+ % i->path);
+
cvs_cluster::entry_map::const_iterator k = (*j)->entries.find(i->path);
- if (k != (*j)->entries.end() &&
- k->second.time > last_modify_time)
- last_modify_time = k->second.time;
+ if ((k != (*j)->entries.end())
+ && (k->second.time > time_of_last_cluster_touching_this_file))
+ {
+ L(F("found cluster touching %d: [t:%d] [a:%d] [c:%d]\n")
+ % i->path
+ % (*j)->first_time
+ % (*j)->author
+ % (*j)->changelog);
+ time_of_last_cluster_touching_this_file = (*j)->first_time;
+ }
}
- L(F("last modification time is %d\n") % last_modify_time);
+ L(F("last modification time is %d\n")
+ % time_of_last_cluster_touching_this_file);
- // step 4: find a cluster which starts after the
+ // step 4: find a cluster which starts on or after the
// last_modify_time, which doesn't modify the file in question,
// and which contains the same author and changelog as our
// commit
@@ -1056,12 +1159,16 @@
for (cluster_set::const_iterator j = clusters.begin();
j != clusters.end(); ++j)
{
- if (((*j)->first_time > last_modify_time)
+ if (((*j)->first_time >= time_of_last_cluster_touching_this_file)
&& ((*j)->author == i->author)
&& ((*j)->changelog == i->changelog)
&& ((*j)->entries.find(i->path) == (*j)->entries.end()))
{
- L(F("picked existing cluster target\n"));
+ L(F("picked existing cluster [t:%d] [a:%d] [c:%d]\n")
+ % (*j)->first_time
+ % (*j)->author
+ % (*j)->changelog);
+
target = (*j);
}
}
@@ -1072,7 +1179,10 @@
if (!target)
{
L(F("building new cluster [t:%d] [a:%d] [c:%d]\n")
- % i->time % i->author % i->changelog);
+ % i->time
+ % i->author
+ % i->changelog);
+
target = cluster_ptr(new cvs_cluster(i->time,
i->author,
i->changelog));
@@ -1232,7 +1342,7 @@
cvs.file_version_interner.lookup(i->second));
initial_cluster.entries.insert(make_pair(i->first, e));
}
- consume_cluster(initial_cluster, false);
+ consume_cluster(initial_cluster, branch.lineage.empty());
}
}
@@ -1263,11 +1373,6 @@
{
data tmp;
write_revision_set(*(i->rev), tmp);
- /*
- cout << "+++WRITING REVISION" << endl;
- cout << tmp << endl;
- cout << "---WRITING REVISION" << endl;
- */
app.db.put_revision(i->rid, *(i->rev));
store_auxiliary_certs(*i);
++n_revisions;
@@ -1417,7 +1522,15 @@
void
cluster_consumer::consume_cluster(cvs_cluster const & c,
bool head_p)
-{
+{
+ // we should never have an empty cluster; it's *possible* to have
+ // an empty changeset (say on a vendor import) but every cluster
+ // should have been created by at least one file commit, even
+ // if the commit made no changes. it's a logical inconsistency if
+ // you have an empty cluster.
+ I(!c.entries.empty());
+
+ L(F("BEGIN consume_cluster()\n"));
shared_ptr rev(new revision_set());
boost::shared_ptr cs(new change_set());
build_change_set(c, *cs);
@@ -1431,11 +1544,11 @@
store_manifest_edge(head_p);
- L(F("consumed cluster %s (parent '%s')\n") % child_rid % rev->edges.begin()->first);
preps.push_back(prepared_revision(child_rid, rev, c));
// now apply same change set to parent_map, making parent_map == child_map
apply_change_set(*cs, parent_map);
parent_mid = child_mid;
parent_rid = child_rid;
+ L(F("END consume_cluster('%s') (parent '%s')\n") % child_rid % rev->edges.begin()->first);
}
--- schema_migration.cc
+++ schema_migration.cc
@@ -14,6 +14,7 @@
#include
+#include "sanity.hh"
#include "schema_migration.hh"
#include "cryptopp/filters.h"
#include "cryptopp/sha.h"
@@ -185,11 +186,17 @@
void migrate(sqlite3 *sql, string target_id)
{
string init;
- calculate_schema_id(sql, init);
if (sql == NULL)
throw runtime_error("NULL sqlite object given to migrate");
+ calculate_schema_id(sql, init);
+ if (target_id == init)
+ {
+ P(F("nothing to migrate; schema %s is up-to-date\n") % init);
+ return;
+ }
+
if (sqlite3_create_function(sql, "sha1", -1, SQLITE_UTF8, NULL,
&sqlite_sha1_fn, NULL, NULL))
throw runtime_error("error registering sha1 function with sqlite");
--- tests/t_ambiguous_tags.at
+++ tests/t_ambiguous_tags.at
@@ -0,0 +1,21 @@
+AT_SETUP([ls tags with ambiguous tags])
+MONOTONE_SETUP
+
+ADD_FILE(testfile, [blah blah
+])
+COMMIT(testbranch)
+R1=`BASE_REVISION`
+
+SET_FILE(testfile, [foo foo
+])
+COMMIT(testbranch)
+R2=`BASE_REVISION`
+
+AT_CHECK(MONOTONE tag $R1 ambig_tag, [], [ignore], [ignore])
+AT_CHECK(MONOTONE tag $R2 ambig_tag, [], [ignore], [ignore])
+
+AT_CHECK(MONOTONE ls tags, [], [stdout], [ignore])
+AT_CHECK(grep -q $R1 stdout)
+AT_CHECK(grep -q $R2 stdout)
+
+AT_CLEANUP
--- tests/t_cvsimport_drepper2.at
+++ tests/t_cvsimport_drepper2.at
@@ -10,9 +10,9 @@
411cfd008f4a72e433b48d6421733b6a792ca3b7 t/libelf-po/POTFILES.in
])
-AT_DATA(test.tags, [initial f137764bdcf393ecb68b44ed2accd3e574681fcb address@hidden
-portable-branch-base d57a26278cb758d02ee64d1c633008bfedc4cefd address@hidden
-portable-branch-fork-20050601T0139 d57a26278cb758d02ee64d1c633008bfedc4cefd address@hidden
+AT_DATA(test.tags, [initial f5e868d2572c2ae5b50fd93bc8d0cb827e416e50 address@hidden
+portable-branch-base 1455aa956c2b095d476e803b8b3d85d8e8509bf7 address@hidden
+portable-branch-fork-20050601T0139 1455aa956c2b095d476e803b8b3d85d8e8509bf7 address@hidden
])
AT_DATA(e.tar.gz.enc, [H4sIAEJyw0IAA+1YbW/bOBLOV+tXEOiHSw+xqhdKTuq9g4u03SuQbIumub1vAS3R
--- tests/t_kill_tag_locally.at
+++ tests/t_kill_tag_locally.at
@@ -0,0 +1,63 @@
+AT_SETUP([db kill_tag_locally])
+MONOTONE_SETUP
+
+ADD_FILE(testfile, [blah blah
+])
+COMMIT(testbranch)
+R1=`BASE_REVISION`
+
+SET_FILE(testfile, [foo foo
+])
+COMMIT(testbranch)
+R2=`BASE_REVISION`
+
+SET_FILE(testfile, [bar bar
+])
+COMMIT(testbranch)
+R3=`BASE_REVISION`
+
+SET_FILE(testfile, [baz baz
+])
+COMMIT(testbranch)
+R4=`BASE_REVISION`
+
+AT_CHECK(MONOTONE tag $R1 ambig_tag, [], [ignore], [ignore])
+AT_CHECK(MONOTONE tag $R2 ambig_tag, [], [ignore], [ignore])
+AT_CHECK(MONOTONE tag $R3 test_tag, [], [ignore], [ignore])
+AT_CHECK(MONOTONE tag $R4 other_tag, [], [ignore], [ignore])
+
+AT_CHECK(MONOTONE ls tags, [], [stdout], [ignore])
+AT_CHECK(grep -q $R1 stdout, [])
+AT_CHECK(grep -q ambig_tag stdout, [])
+AT_CHECK(grep -q $R2 stdout, [])
+AT_CHECK(grep -q ambig_tag stdout, [])
+AT_CHECK(grep -q $R3 stdout, [])
+AT_CHECK(grep -q test_tag stdout, [])
+AT_CHECK(grep -q $R4 stdout, [])
+AT_CHECK(grep -q other_tag stdout, [])
+
+AT_CHECK(MONOTONE db kill_tag_locally test_tag, [], [ignore], [ignore])
+
+AT_CHECK(MONOTONE ls tags, [], [stdout], [ignore])
+AT_CHECK(grep -q $R1 stdout, [])
+AT_CHECK(grep -q ambig_tag stdout, [])
+AT_CHECK(grep -q $R2 stdout, [])
+AT_CHECK(grep -q ambig_tag stdout, [])
+AT_CHECK(grep -q $R3 stdout, [1])
+AT_CHECK(grep -q test_tag stdout, [1])
+AT_CHECK(grep -q $R4 stdout, [])
+AT_CHECK(grep -q other_tag stdout, [])
+
+AT_CHECK(MONOTONE db kill_tag_locally ambig_tag, [], [ignore], [ignore])
+
+AT_CHECK(MONOTONE ls tags, [], [stdout], [ignore])
+AT_CHECK(grep -q $R1 stdout, [1])
+AT_CHECK(grep -q ambig_tag stdout, [1])
+AT_CHECK(grep -q $R2 stdout, [1])
+AT_CHECK(grep -q ambig_tag stdout, [1])
+AT_CHECK(grep -q $R3 stdout, [1])
+AT_CHECK(grep -q test_tag stdout, [1])
+AT_CHECK(grep -q $R4 stdout, [])
+AT_CHECK(grep -q other_tag stdout, [])
+
+AT_CLEANUP
--- testsuite.at
+++ testsuite.at
@@ -380,15 +380,13 @@
# note that NETSYNC_SERVE_START is _not_ a special case of this macro.
m4_define([NETSYNC_SERVE_N_START], [
NETSYNC_KILLHARD
-_FOO=$2
-MONOTONE --db=test$1.db --rcfile=netsync.lua --pid-file=monotone_at.pid serve localhost:$_PORT ${_FOO:-"*"} &
+MONOTONE --db=test$1.db --rcfile=netsync.lua --pid-file=monotone_at.pid serve localhost:$_PORT m4_if($2, [], "*", $2) &
sleep 4
])
# run as NETSYNC_SERVE_START(pattern)
m4_define([NETSYNC_SERVE_START], [
NETSYNC_KILLHARD
-FOO=$1
-MONOTONE --rcfile=netsync.lua --pid-file=monotone_at.pid serve localhost:$_PORT ${_FOO:-"*"} &
+MONOTONE --rcfile=netsync.lua --pid-file=monotone_at.pid serve localhost:$_PORT m4_if($1, [], "*", $1) &
sleep 4
])
# run as NETSYNC_SERVE_STOP
@@ -671,3 +669,5 @@
m4_include(tests/t_netsync_read_permissions.at)
m4_include(tests/t_netsync_exclude.at)
m4_include(tests/t_netsync_exclude_default.at)
+m4_include(tests/t_ambiguous_tags.at)
+m4_include(tests/t_kill_tag_locally.at)