#
# patch "ChangeLog"
# from [a8c8177d569d38b1854e9b99413352ae7b1950d7]
# to [48066cc8b59dc9bb063f86243abc84dddcca9ca0]
#
# patch "basic_io.cc"
# from [fb9d041c4191d1cd6997e9b4ac65adfb7361e0ef]
# to [362c354a98656867a4874ce83f15faf4c63dcaf5]
#
# patch "change_set.cc"
# from [514f646e5cc061ba8bf85e92eebd65357b4c85bf]
# to [8b733d5b674155d95c2c8dda60fa99b18acf9c11]
#
# patch "monotone.texi"
# from [38e796ddafa66c78001fb53209d8c7e0dd8c0b75]
# to [beb10855718e7f99b7f666defd1ab2de5e5e81dd]
#
# patch "packet.cc"
# from [7f4cc56b5a4fd62495fcec98bcc2e67c6ddb96a8]
# to [478a95c883062f409c2d9558dd3177e9d5e6ac2c]
#
# patch "transforms.cc"
# from [3c627fa48277dc799dd2fc2eccb8d7b9ca93688b]
# to [9acbe75b4689bf347b0d840067793155a425524b]
#
--- ChangeLog
+++ ChangeLog
@@ -1,5 +1,22 @@
+2005-04-18 Nathaniel Smith
+
+ * monotone.texi (Dealing with a Fork): Clarify (hopefully) what we
+ mean when we say that "update" is a dangerous command.
+
2005-04-17 Matt Johnston
+ * change_set.cc (confirm_proper_tree): use a std::set rather than
+ dynamic_bitset for the ancestor list, improving performance for
+ common tree structures.
+ * basic_io.cc: reserve() a string
+
+2005-04-17 Matt Johnston
+
+ * packet.cc: fix up unit test compilation.
+ * transforms.cc: fix up unit test compilation.
+
+2005-04-17 Matt Johnston
+
* vocab_terms.hh: remove commented out lines.
2005-04-17 Matt Johnston
--- basic_io.cc
+++ basic_io.cc
@@ -56,6 +56,7 @@
I(std::isalnum(*i) || *i == '_');
std::string escaped;
+ escaped.reserve(v.size() + 8);
for (std::string::const_iterator i = v.begin();
i != v.end(); ++i)
--- change_set.cc
+++ change_set.cc
@@ -525,19 +525,21 @@
size_t tid_range = max_tid - min_tid + 1;
boost::dynamic_bitset<> confirmed(tid_range);
- boost::dynamic_bitset<> ancs(tid_range);
for (path_state::const_iterator i = ps.begin(); i != ps.end(); ++i)
{
tid curr = i->first;
path_item item = i->second;
- ancs.reset();
+ std::set ancs; // a set is more efficient, at least in normal
+ // trees where the number of ancestors is
+ // significantly less than tid_range
while (confirmed.test(curr - min_tid) == false)
{
sanity_check_path_item(item);
- I(ancs.test(curr - min_tid) == false);
- ancs.set(curr - min_tid);
+ I(ancs.find(curr) == ancs.end());
+ ancs.insert(curr);
+ confirmed.set(curr - min_tid);
if (path_item_parent(item) == root_tid)
break;
else
@@ -554,7 +556,10 @@
I(path_item_type(item) == ptype_directory);
}
}
- confirmed |= ancs;
+ for (std::set::const_iterator a = ancs.begin(); a != ancs.end(); a++)
+ {
+ confirmed.set(*a - min_tid);
+ }
}
}
--- monotone.texi
+++ monotone.texi
@@ -2096,10 +2096,27 @@
(an update) and a ``post-commit'' merge. Both sorts of merge use the
exact same algorithm. The major difference concerns the recoverability
of the pre-merge state: if you commit your work first, and merge after
-committing, the merge can fail (due to difficulty in a manual merge
-step) and your committed state is still safe. It is therefore
-recommended that you commit your work @emph{first}, before merging.
+committing, then even if the merge somehow fails (due to difficulty in a
+manual merge step, for instance), your committed state is still safe.
+If you update, on the other hand, you are requesting that monotone
+directly modify your working copy, and while monotone will try hard not
+to break anything, this process is inherently more open to error. It is
+therefore recommended that you commit your work @emph{first}, before
+merging.
+If you have previously used another version control system, this may at
+first seem surprising; there are some systems where you are
address@hidden to update, and risk the above problems, before you can
+commit. Monotone, however, was designed with this problem in mind, and
+thus @emph{always} allows you to commit before merging. A good rule of
+thumb is to only use @command{update} in working copies with no local
+modifications, or when you actually want to work against a different
+base revision (perhaps because finishing your change turns out to
+require some fixes made in another revision, or because you discover
+that you have accidentally started working against a revision that
+contains unrelated bugs, and need to back out to a working revision for
+testing).
+
@page
@node Branching and Merging
@section Branching and Merging
--- packet.cc
+++ packet.cc
@@ -1377,13 +1377,13 @@
packet_writer pw(oss);
// an fdata packet
- file_data fdata("this is some file data");
+ file_data fdata(data("this is some file data"));
file_id fid;
calculate_ident(fdata, fid);
pw.consume_file_data(fid, fdata);
// an fdelta packet
- file_data fdata2("this is some file data which is not the same as the first one");
+ file_data fdata2(data("this is some file data which is not the same as the first one"));
file_id fid2;
calculate_ident(fdata2, fid);
delta del;
--- transforms.cc
+++ transforms.cc
@@ -823,19 +823,11 @@
{
data dat1(string("the first day of spring\nmakes me want to sing\n"));
data dat2(string("the first day of summer\nis a major bummer\n"));
- data dat3;
- gzip dat1_gz, dat2_gz, dat3_gz;
- base64< gzip > dat1_bgz, dat2_bgz, dat3_bgz;
- encode_gzip(dat1, dat1_gz);
- encode_gzip(dat2, dat2_gz);
- encode_base64(dat1_gz, dat1_bgz);
- encode_base64(dat2_gz, dat2_bgz);
- base64< gzip > del_bgz;
- diff(dat1_bgz, dat2_bgz, del_bgz);
+ delta del;
+ diff(dat1, dat2, del);
- patch(dat1_bgz, del_bgz, dat3_bgz);
- decode_base64(dat3_bgz, dat3_gz);
- decode_gzip(dat3_gz, dat3);
+ data dat3;
+ patch(dat1, del, dat3);
BOOST_CHECK(dat3 == dat2);
}