monotone-commits-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Monotone-commits-diffs] net.venge.monotone.string-sanitization: 2b5e1e


From: code
Subject: [Monotone-commits-diffs] net.venge.monotone.string-sanitization: 2b5e1e74133523971222f64c273c583a199e9e45
Date: Thu, 24 Feb 2011 00:55:25 +0100 (CET)

revision:            2b5e1e74133523971222f64c273c583a199e9e45
date:                2011-02-23T23:49:49
author:              address@hidden
branch:              net.venge.monotone.string-sanitization
changelog:
* src/*.cc: start string sanitization; all hash / ID placeholders are
  printed without single quotes, all other strings (branch and key
  names, file names, attributes ...) are surrounded with single quotes;
  this probably breaks a couple of tests and is not finished either,
  but more will come

manifest:
format_version "1"

new_manifest [b9a566c46de9e29a23c9fc6592adde36106c563e]

old_revision [5bb1b74d83a9c53d6e11e399de59ad7d31444bf8]

patch "src/annotate.cc"
 from [28ad5569c22bb905380ce6abbb73f6d6ad5c629b]
   to [7defae118c92570f6b8adccffd17bd913ecbf041]

patch "src/automate.cc"
 from [76c560940da4dbc61dede825e34696285ece4bc4]
   to [88bf3c76888c70149bc0397f571838dc5d2a844f]

patch "src/cert.cc"
 from [ea863a72e399354c6a859a2fe6d5185529ed8823]
   to [8d074780068206b16f302c9e5b0ab6da609f3448]

patch "src/cmd.cc"
 from [d824fb0d9171a2ab5df2a0984ddcd9e789548ec5]
   to [8e757d3b01172b53644e669d4fb7f0ec097555d1]

patch "src/cmd_automate.cc"
 from [2c5e25aeca1c3286a7f77a36f4a6a6581166177e]
   to [35afc0ad1040244533e24ad489a8ec139d0b163b]

patch "src/cmd_conflicts.cc"
 from [0c87cd90c1e90cbb312e51c2aa175acadf07bd20]
   to [fe887aac3fd84b1048360551dd8c8638d059ea7d]

patch "src/cmd_db.cc"
 from [df467ec186dc1d1ebcae83d84495009dbeba53e9]
   to [419d06e8b256dbe4d0499e0da58725ad535d5570]

patch "src/cmd_diff_log.cc"
 from [94a354875b587d372e6d0898ab6fc8355ce29c21]
   to [39f108bc1d798d8525503ea5cfa693188a378fab]

patch "src/cmd_files.cc"
 from [7e9ffd4a06daeb0ef7ab7557d3a656c6b99ab2ad]
   to [88f831d8a59a91d45039fd91825b2d81c48349dd]

patch "src/cmd_key_cert.cc"
 from [7a98615c6d189044c34f17845df337ca63905b41]
   to [cf519d7c14bbf3bc69dff0a333a7be638c3e047d]

patch "src/cmd_list.cc"
 from [eede6b49bfe267b8e9b361c12bbcc26f53e86191]
   to [e8cdc511a1a5c9f8234bb45628fcb949342cd57f]

patch "src/cmd_merging.cc"
 from [d5f35495dd5c6d7f1749c043ec84d8577519252b]
   to [98408f1a05aecbe60293c0607be8f6ae65f83940]

patch "src/cmd_netsync.cc"
 from [5b0013c5fa9dd8d362bf5c6c627099e0aaad06da]
   to [d29a3ff0c706db0f2cae9323f53079bca16fd889]

patch "src/cmd_othervcs.cc"
 from [ba717ebc34573dc41258fe4f679632876097eb48]
   to [631506764856b783b32348dc411abc5b174037f5]

patch "src/cmd_ws_commit.cc"
 from [7b91a53d8eec4e78b062092f85c945bbaaefe23a]
   to [b701f509d9afc92c35619f8b5eae31c13366e62d]

patch "src/database.cc"
 from [f61bd0d7d66a715c7a55cb943eb8521db6d91d68]
   to [9f26990e74779364be7f1a453b3653ded4844325]

patch "src/file_io.cc"
 from [6683cf8d459a89597caf1eb3ce01800fe4404643]
   to [3a9cd566f250de51fefd41368c4fc1aaa1e85896]

patch "src/key_store.cc"
 from [a9e840131b61a2a508d8e8e19464a3591d56bac4]
   to [f4355094b08083f0e10235d721abec6f89d6f890]

patch "src/keys.cc"
 from [19db9fa939a1d87aba964f4e84743adcfff6fdd2]
   to [50a2cbba3c93da0f181c2d947089e811974b44aa]

patch "src/merge_conflict.cc"
 from [13b609e15f2f3e2206fdb21d0f5ab5def44c2442]
   to [49713287fe6913386578c2956917569165ebdd84]

patch "src/merge_content.cc"
 from [620a2b95ee5b72299ee2780ec0077e25c03b8795]
   to [76d5a0997d9217b309d75806f8059d8f56f2ca49]

patch "src/migrate_ancestry.cc"
 from [7e659f3dbdf54efc68479591f24ecaef3db3edd8]
   to [7cee409c3b099ef96551cd53a1d16684108fefdb]

patch "src/migrate_schema.cc"
 from [d6161bf90f49f20afd61716ce2ab56da58567bf4]
   to [778717ded91866aaabea3752ebd5d44400ce8595]

patch "src/migrate_work.cc"
 from [2a8658dad954b975e65d3ceaf5df608007bfe841]
   to [656cfec56129df09e8f9f5b3f75ea71bfef0c6e8]

patch "src/netio.hh"
 from [34259951595744b5948e66a2b7e00a2de34e2877]
   to [05953ebcfb8b75c0357ef7866e122d4f68c3d90d]

patch "src/netsync.cc"
 from [bda5adcae7b66d8c3195a4eceae052a225219a17]
   to [19950fb58bfd69abe3783c3c5e4f0fdaa748b2dc]

patch "src/selectors.cc"
 from [5337d6519103cf04672b8abffd05103c6bf38ab8]
   to [8325a670b116d87a325075ebe75bb20dbced8de4]
============================================================
--- src/cert.cc	ea863a72e399354c6a859a2fe6d5185529ed8823
+++ src/cert.cc	8d074780068206b16f302c9e5b0ab6da609f3448
@@ -124,7 +124,7 @@ read_cert(database & db, string const & 
   id check;
   tmp.hash_code(keyname, check);
   if (!(check == hash))
-    throw bad_decode(F("calculated cert hash '%s' does not match '%s'")
+    throw bad_decode(F("calculated cert hash %s does not match %s")
                      % check % hash);
   t = tmp;
   return true;
============================================================
--- src/database.cc	f61bd0d7d66a715c7a55cb943eb8521db6d91d68
+++ src/database.cc	9f26990e74779364be7f1a453b3653ded4844325
@@ -614,7 +614,7 @@ database_impl::check_format()
       // they need to either changesetify or rosterify.  which?
       if (table_has_data("revisions"))
         E(false, origin::no_fault,
-          F("database %s contains old-style revisions\n"
+          F("database '%s' contains old-style revisions\n"
             "if you are a project leader or doing local testing:\n"
             "  see the file UPGRADE for instructions on upgrading.\n"
             "if you are not a project leader:\n"
@@ -624,7 +624,7 @@ database_impl::check_format()
           % filename);
       else
         E(false, origin::no_fault,
-          F("database %s contains manifests but no revisions\n"
+          F("database '%s' contains manifests but no revisions\n"
             "this is a very old database; it needs to be upgraded\n"
             "please see README.changesets for details")
           % filename);
@@ -646,7 +646,7 @@ database_impl::check_caches()
     }
 
   E(caches_are_filled, origin::no_fault,
-    F("database %s lacks some cached data\n"
+    F("database '%s' lacks some cached data\n"
       "run '%s db regenerate_caches' to restore use of this database")
     % filename % prog_name);
 }
@@ -2991,9 +2991,9 @@ database::put_revision(revision_id const
       if (!edge_old_revision(i).inner()().empty()
           && !revision_exists(edge_old_revision(i)))
         {
-          W(F("missing prerequisite revision '%s'")
+          W(F("missing prerequisite revision %s")
             % edge_old_revision(i));
-          W(F("dropping revision '%s'") % new_id);
+          W(F("dropping revision %s") % new_id);
           return false;
         }
 
@@ -3003,8 +3003,8 @@ database::put_revision(revision_id const
         {
           if (! file_version_exists(a->second))
             {
-              W(F("missing prerequisite file '%s'") % a->second);
-              W(F("dropping revision '%s'") % new_id);
+              W(F("missing prerequisite file %s") % a->second);
+              W(F("dropping revision %s") % new_id);
               return false;
             }
         }
@@ -3018,17 +3018,17 @@ database::put_revision(revision_id const
 
           if (! file_version_exists(delta_entry_src(d)))
             {
-              W(F("missing prerequisite file pre-delta '%s'")
+              W(F("missing prerequisite file pre-delta %s")
                 % delta_entry_src(d));
-              W(F("dropping revision '%s'") % new_id);
+              W(F("dropping revision %s") % new_id);
               return false;
             }
 
           if (! file_version_exists(delta_entry_dst(d)))
             {
-              W(F("missing prerequisite file post-delta '%s'")
+              W(F("missing prerequisite file post-delta %s")
                 % delta_entry_dst(d));
-              W(F("dropping revision '%s'") % new_id);
+              W(F("dropping revision %s") % new_id);
               return false;
             }
         }
@@ -3624,7 +3624,7 @@ database_impl::oldstyle_results_to_certs
           k_id = key_id(key_res[0][0], origin::database);
         else
           E(false, origin::database,
-            F("Your database contains multiple keys named %s") % k_name);
+            F("Your database contains multiple keys named '%s'") % k_name);
       }
 
       rsa_sha1_signature sig(res[i][4], origin::database);
@@ -3785,7 +3785,7 @@ database::put_revision_cert(cert const &
 
   if (!revision_exists(revision_id(cert.ident)))
     {
-      W(F("cert revision '%s' does not exist in db")
+      W(F("cert revision %s does not exist in db")
         % cert.ident);
       W(F("dropping cert"));
       return false;
@@ -4121,7 +4121,7 @@ namespace {
               {
                 W(F("ignoring unknown signature by '%s' on '%s'") % *u % txt);
               }
-            W(F("trust function disliked %d signers of %s cert on revision %s")
+            W(F("trust function disliked %d signers of '%s' cert on revision %s")
               % i->second.good_sigs.size()
               % get<1>(i->first)
               % get<0>(i->first));
@@ -4822,7 +4822,7 @@ database_impl::check_db_exists()
       return;
 
     case path::nonexistent:
-      E(false, origin::user, F("database %s does not exist") % filename);
+      E(false, origin::user, F("database '%s' does not exist") % filename);
 
     case path::directory:
       if (directory_is_workspace(filename))
@@ -4830,11 +4830,11 @@ database_impl::check_db_exists()
           options opts;
           workspace::get_options(filename, opts);
           E(opts.dbname.as_internal().empty(), origin::user,
-            F("%s is a workspace, not a database\n"
-              "(did you mean %s?)") % filename % opts.dbname);
+            F("'%s' is a workspace, not a database\n"
+              "(did you mean '%s'?)") % filename % opts.dbname);
         }
       E(false, origin::user,
-        F("%s is a directory, not a database") % filename);
+        F("'%s' is a directory, not a database") % filename);
     }
 }
 
@@ -4842,7 +4842,7 @@ database_impl::check_db_nonexistent()
 database_impl::check_db_nonexistent()
 {
   require_path_is_nonexistent(filename,
-                              F("database %s already exists")
+                              F("database '%s' already exists")
                               % filename);
 
   system_path journal(filename.as_internal() + "-journal", origin::internal);
============================================================
--- src/file_io.cc	6683cf8d459a89597caf1eb3ce01800fe4404643
+++ src/file_io.cc	3a9cd566f250de51fefd41368c4fc1aaa1e85896
@@ -278,12 +278,12 @@ read_data(any_path const & p, data & dat
 read_data(any_path const & p, data & dat)
 {
   require_path_is_file(p,
-                       F("file %s does not exist") % p,
-                       F("file %s cannot be read as data; it is a directory") % p);
+                       F("file '%s' does not exist") % p,
+                       F("file '%s' cannot be read as data; it is a directory") % p);
 
   ifstream file(p.as_external().c_str(),
                 ios_base::in | ios_base::binary);
-  E(file, origin::user, F("cannot open file %s for reading") % p);
+  E(file, origin::user, F("cannot open file '%s' for reading") % p);
   unfiltered_pipe->start_msg();
   file >> *unfiltered_pipe;
   unfiltered_pipe->end_msg();
============================================================
--- src/keys.cc	19db9fa939a1d87aba964f4e84743adcfff6fdd2
+++ src/keys.cc	50a2cbba3c93da0f181c2d947089e811974b44aa
@@ -40,7 +40,7 @@ load_key_pair(key_store & keys, key_id c
 load_key_pair(key_store & keys, key_id const & id)
 {
   E(keys.key_pair_exists(id), origin::user,
-    F("no key pair '%s' found in key store '%s'")
+    F("no key pair %s found in key store '%s'")
     % id % keys.get_key_dir());
 }
 
@@ -85,7 +85,7 @@ namespace {
             db.get_key(chosen_key, pub_key);
             E(keys_match(name, pub_key, name, priv_key.pub),
               origin::no_fault,
-              F("The key '%s' stored in your database does\n"
+              F("The key %s stored in your database does\n"
                 "not match the version in your local key store!")
               % chosen_key);
           }
============================================================
--- src/migrate_schema.cc	d6161bf90f49f20afd61716ce2ab56da58567bf4
+++ src/migrate_schema.cc	778717ded91866aaabea3752ebd5d44400ce8595
@@ -576,7 +576,7 @@ migrate_to_external_privkeys(sqlite3 * d
           pub = base64<rsa_pub_key>(stmt.column_string(2),
                                     origin::database);
 
-        P(F("moving key '%s' from database to %s")
+        P(F("moving key '%s' from database to '%s'")
           % ident % keys.get_key_dir());
         keys.migrate_old_key_pair(ident,
                                   decode_base64(old_priv),
@@ -1157,16 +1157,16 @@ diagnose_unrecognized_schema(schema_mism
                              system_path const & filename)
 {
   E(cat != SCHEMA_EMPTY, origin::user,
-    F("cannot use the empty sqlite database %s\n"
+    F("cannot use the empty sqlite database '%s'\n"
       "(monotone databases must be created with '%s db init')")
     % filename % prog_name);
 
   E(cat != SCHEMA_NOT_MONOTONE, origin::user,
-    F("%s does not appear to be a monotone database\n")
+    F("'%s' does not appear to be a monotone database\n")
     % filename);
 
   E(cat != SCHEMA_TOO_NEW, origin::user,
-    F("%s appears to be a monotone database, but this version of\n"
+    F("'%s' appears to be a monotone database, but this version of\n"
       "monotone does not recognize its schema.\n"
       "you probably need a newer version of monotone.")
     % filename);
@@ -1185,7 +1185,7 @@ check_sql_schema(sqlite3 * db, system_pa
   diagnose_unrecognized_schema(cat, filename);
 
   E(cat != SCHEMA_MIGRATION_NEEDED, origin::user,
-    F("database %s is laid out according to an old schema\n"
+    F("database '%s' is laid out according to an old schema\n"
       "try '%s db migrate' to upgrade\n"
       "(this is irreversible; you may want to make a backup copy first)")
     % filename % prog_name);
@@ -1338,7 +1338,7 @@ test_migration_step(sqlite3 * db, key_st
   E(m->migrator_sql || m->migrator_func, origin::user,
     F("schema %s is up to date") % schema);
 
-  L(FL("testing migration from %s to %s\n in database %s")
+  L(FL("testing migration from %s to %s\n in database '%s'")
     % schema % m[1].id % filename);
 
   if (m->migrator_sql)
============================================================
--- src/netsync.cc	bda5adcae7b66d8c3195a4eceae052a225219a17
+++ src/netsync.cc	19950fb58bfd69abe3783c3c5e4f0fdaa748b2dc
@@ -109,9 +109,9 @@ call_server(app_state & app,
   Netxx::Timeout timeout(static_cast<long>(constants::netsync_timeout_seconds)),
     instant(0,1);
 
-  P(F("connecting to %s") % info->client.get_uri().resource());
-  P(F("  include pattern  %s") % info->client.get_include_pattern());
-  P(F("  exclude pattern  %s") % info->client.get_exclude_pattern());
+  P(F("connecting to '%s'") % info->client.get_uri().resource());
+  P(F("  include pattern  '%s'") % info->client.get_include_pattern());
+  P(F("  exclude pattern  '%s'") % info->client.get_exclude_pattern());
 
   shared_ptr<Netxx::StreamBase> server
     = build_stream_to_server(app.opts, app.lua, info, timeout);
@@ -160,7 +160,7 @@ call_server(app_state & app,
           // error from our server (which is translated to a decode
           // exception). We call these cases E() errors.
           E(false, origin::network,
-            F("processing failure while talking to peer %s, disconnecting")
+            F("processing failure while talking to peer '%s', disconnecting")
             % sess->get_peer());
           return;
         }
@@ -168,7 +168,7 @@ call_server(app_state & app,
       bool io_ok = react.do_io();
 
       E(io_ok, origin::network,
-        F("timed out waiting for I/O with peer %s, disconnecting")
+        F("timed out waiting for I/O with peer '%s', disconnecting")
         % sess->get_peer());
 
       if (react.size() == 0)
@@ -185,19 +185,19 @@ call_server(app_state & app,
 
           if (sess->protocol_state == session_base::confirmed_state)
             {
-              P(F("successful exchange with %s")
+              P(F("successful exchange with '%s'")
                 % sess->get_peer());
               return;
             }
           else if (sess->encountered_error)
             {
-              P(F("peer %s disconnected after we informed them of error")
+              P(F("peer '%s' disconnected after we informed them of error")
                 % sess->get_peer());
               return;
             }
           else
             E(false, origin::network,
-              (F("I/O failure while talking to peer %s, disconnecting")
+              (F("I/O failure while talking to peer '%s', disconnecting")
                % sess->get_peer()));
         }
     }
@@ -216,7 +216,7 @@ session_from_server_sync_item(app_state 
 
   try
     {
-      P(F("connecting to %s") % info->client.get_uri().resource());
+      P(F("connecting to '%s'") % info->client.get_uri().resource());
       shared_ptr<Netxx::StreamBase> server
         = build_stream_to_server(app.opts, app.lua, info,
                                  Netxx::Timeout(constants::netsync_timeout_seconds));
@@ -357,7 +357,7 @@ serve_single_connection(project_t & proj
                         shared_ptr<session> sess)
 {
   sess->begin_service();
-  P(F("beginning service on %s") % sess->get_peer());
+  P(F("beginning service on '%s'") % sess->get_peer());
 
   transaction_guard guard(project.db);
 
============================================================
--- src/netio.hh	34259951595744b5948e66a2b7e00a2de34e2877
+++ src/netio.hh	05953ebcfb8b75c0357ef7866e122d4f68c3d90d
@@ -33,7 +33,7 @@ require_bytes(std::string const & str,
   if (len == 0)
     return;
   if (str.size() < pos + len)
-    throw bad_decode(F("need %d bytes to decode %s at %d, only have %d")
+    throw bad_decode(F("need %d bytes to decode '%s' at %d, only have %d")
                      % len % name % pos % (str.size() - pos));
 }
 
@@ -51,7 +51,7 @@ require_bytes(string_queue const & str,
   if (len == 0)
     return;
   if (str.size() < pos + len)
-    throw bad_decode(F("need %d bytes to decode %s at %d, only have %d")
+    throw bad_decode(F("need %d bytes to decode '%s' at %d, only have %d")
                      % len % name % pos % (str.size() - pos));
 }
 
@@ -325,7 +325,7 @@ assert_end_of_buffer(std::string const &
                      std::string const & name)
 {
   if (str.size() != pos)
-    throw bad_decode(F("expected %s to end at %d, have %d bytes")
+    throw bad_decode(F("expected '%s' to end at %d, have %d bytes")
                      % name % pos % str.size());
 }
 
============================================================
--- src/automate.cc	76c560940da4dbc61dede825e34696285ece4bc4
+++ src/automate.cc	88bf3c76888c70149bc0397f571838dc5d2a844f
@@ -129,7 +129,7 @@ CMD_AUTOMATE(ancestors, N_("REV1 [REV2 [
     {
       revision_id rid(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(rid), origin::user,
-        F("no such revision '%s'") % rid);
+        F("no revision %s found in database") % rid);
       frontier.push_back(rid);
     }
   while (!frontier.empty())
@@ -182,7 +182,7 @@ CMD_AUTOMATE(descendents, N_("REV1 [REV2
     {
       revision_id rid(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(rid), origin::user,
-        F("no such revision '%s'") % rid);
+        F("no revision %s found in database") % rid);
       frontier.push_back(rid);
     }
   while (!frontier.empty())
@@ -232,7 +232,7 @@ CMD_AUTOMATE(erase_ancestors, N_("[REV1 
     {
       revision_id rid(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(rid), origin::user,
-        F("no such revision '%s'") % rid);
+        F("no revision %s found in database") % rid);
       revs.insert(rid);
     }
   erase_ancestors(db, revs);
@@ -262,7 +262,7 @@ CMD_AUTOMATE(toposort, N_("[REV1 [REV2 [
     {
       revision_id rid(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(rid), origin::user,
-        F("no such revision '%s'") % rid);
+        F("no revision %s found in database") % rid);
       revs.insert(rid);
     }
   vector<revision_id> sorted;
@@ -304,12 +304,12 @@ CMD_AUTOMATE(ancestry_difference, N_("NE
   args_vector::const_iterator i = args.begin();
   a = decode_hexenc_as<revision_id>((*i)(), origin::user);
   E(db.revision_exists(a), origin::user,
-    F("no such revision '%s'") % a);
+    F("no revision %s found in database") % a);
   for (++i; i != args.end(); ++i)
     {
       revision_id b(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(b), origin::user,
-        F("no such revision '%s'") % b);
+        F("no revision %s found in database") % b);
       bs.insert(b);
     }
   set<revision_id> ancestors;
@@ -403,7 +403,7 @@ CMD_AUTOMATE(parents, N_("REV"),
 
   revision_id rid(decode_hexenc_as<revision_id>(idx(args, 0)(), origin::user));
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % rid);
+    F("no revision %s found in database") % rid);
   set<revision_id> parents;
   db.get_revision_parents(rid, parents);
   for (set<revision_id>::const_iterator i = parents.begin();
@@ -434,7 +434,7 @@ CMD_AUTOMATE(children, N_("REV"),
 
   revision_id rid(decode_hexenc_as<revision_id>(idx(args, 0)(), origin::user));
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % rid);
+    F("no revision %s found in database") % rid);
   set<revision_id> children;
   db.get_revision_children(rid, children);
   for (set<revision_id>::const_iterator i = children.begin();
@@ -1599,7 +1599,7 @@ CMD_AUTOMATE(packet_for_rdata, N_("REVID
   revision_data r_data;
 
   E(db.revision_exists(r_id), origin::user,
-    F("no such revision '%s'") % r_id);
+    F("no revision %s found in database") % r_id);
   db.get_revision(r_id, r_data);
   pw.consume_revision_data(r_id, r_data);
 }
@@ -1631,7 +1631,7 @@ CMD_AUTOMATE(packets_for_certs, N_("REVI
   vector<cert> certs;
 
   E(db.revision_exists(r_id), origin::user,
-    F("no such revision '%s'") % r_id);
+    F("no revision %s found in database") % r_id);
   project.get_revision_certs(r_id, certs);
 
   for (vector<cert>::const_iterator i = certs.begin();
@@ -1698,9 +1698,9 @@ CMD_AUTOMATE(packet_for_fdelta, N_("OLD_
   file_data f_old_data, f_new_data;
 
   E(db.file_version_exists(f_old_id), origin::user,
-    F("no such revision '%s'") % f_old_id);
+    F("no revision %s found in database") % f_old_id);
   E(db.file_version_exists(f_new_id), origin::user,
-    F("no such revision '%s'") % f_new_id);
+    F("no revision %s found in database") % f_new_id);
   db.get_file_version(f_old_id, f_old_data);
   db.get_file_version(f_new_id, f_new_data);
   delta del;
@@ -1736,7 +1736,7 @@ CMD_AUTOMATE(common_ancestors, N_("REV1 
     {
       revision_id rid(decode_hexenc_as<revision_id>((*i)(), origin::user));
       E(db.revision_exists(rid), origin::user,
-        F("No such revision %s") % rid);
+        F("no revision %s found in database") % rid);
       revs.insert(rid);
     }
 
@@ -1948,7 +1948,7 @@ CMD_AUTOMATE(get_content_changed, N_("RE
 
   file_path path = file_path_external(idx(args,1));
   E(new_roster.has_node(path), origin::user,
-    F("file %s is unknown for revision %s")
+    F("file '%s' is unknown for revision %s")
     % path % ident);
 
   const_node_t node = new_roster.get_node(path);
@@ -2015,7 +2015,7 @@ CMD_AUTOMATE(get_corresponding_path, N_(
 
   file_path path = file_path_external(idx(args,1));
   E(new_roster.has_node(path), origin::user,
-    F("file %s is unknown for revision %s") % path % ident);
+    F("file '%s' is unknown for revision %s") % path % ident);
 
   const_node_t node = new_roster.get_node(path);
   basic_io::printer prt;
@@ -2172,7 +2172,7 @@ CMD_AUTOMATE(cert, N_("REVISION-ID NAME 
   hexenc<id> hrid(idx(args, 0)(), origin::user);
   revision_id rid(decode_hexenc_as<revision_id>(hrid(), origin::user));
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % hrid);
+    F("no revision %s found in database") % hrid);
 
   cache_user_key(app.opts, project, keys, app.lua);
 
@@ -2310,7 +2310,7 @@ CMD_AUTOMATE(drop_db_variables, N_("DOMA
       var_name name = typecast_vocab<var_name>(idx(args, 1));
       var_key  key(domain, name);
       E(db.var_exists(key), origin::user,
-        F("no var with name %s in domain %s") % name % domain);
+        F("no var with name '%s' in domain '%s'") % name % domain);
       db.clear_var(key);
     }
   else
@@ -2330,7 +2330,7 @@ CMD_AUTOMATE(drop_db_variables, N_("DOMA
         }
 
       E(found_something, origin::user,
-        F("no variables found in domain %s") % domain);
+        F("no variables found in domain '%s'") % domain);
     }
 }
 
============================================================
--- src/annotate.cc	28ad5569c22bb905380ce6abbb73f6d6ad5c629b
+++ src/annotate.cc	7defae118c92570f6b8adccffd17bd913ecbf041
@@ -426,9 +426,9 @@ annotate_context::build_revisions_to_ann
       // for both reasons we change the output slightly so that no unwanted
       // spaces pop up
       if (!author.empty() && !date.empty())
-        result = (F("%s.. by %s %s: ") % hex_rev_str.substr(0, 8) % author % date).str();
+        result = (F("%s.. by '%s' %s: ") % hex_rev_str.substr(0, 8) % author % date).str();
       else if (!author.empty())
-        result = (F("%s.. by %s: ") % hex_rev_str.substr(0, 8) % author).str();
+        result = (F("%s.. by '%s': ") % hex_rev_str.substr(0, 8) % author).str();
       else if (!date.empty())
         result = (F("%s.. %s: ") % hex_rev_str.substr(0, 8) % date).str();
       else
============================================================
--- src/selectors.cc	5337d6519103cf04672b8abffd05103c6bf38ab8
+++ src/selectors.cc	8325a670b116d87a325075ebe75bb20dbced8de4
@@ -901,7 +901,7 @@ complete(options const & opts, lua_hooks
     {
       completions.insert(isel->get_assuming_full_length());
       E(project.db.revision_exists(*completions.begin()), origin::user,
-        F("no such revision '%s'") % *completions.begin());
+        F("no revision %s found in database") % *completions.begin());
       return;
     }
 
@@ -919,7 +919,7 @@ complete(options const & opts, lua_hooks
       // This may be impossible, but let's make sure.
       // All the callers used to do it.
       E(project.db.revision_exists(*i), origin::user,
-        F("no such revision '%s'") % *i);
+        F("no revision %s found in database") % *i);
     }
 }
 
============================================================
--- src/key_store.cc	a9e840131b61a2a508d8e8e19464a3591d56bac4
+++ src/key_store.cc	f4355094b08083f0e10235d721abec6f89d6f890
@@ -157,21 +157,21 @@ namespace
       key_hash_code(name, kp.pub, ident);
       E(kss.put_key_pair_memory(full_key_info(ident, key_info(name, kp))),
         origin::system,
-        F("Key store has multiple copies of the key with id '%s'.") % ident);
+        F("Key store has multiple copies of the key with id %s.") % ident);
 
-      L(FL("successfully read key pair '%s' from key store") % ident);
+      L(FL("successfully read key pair %s from key store") % ident);
     }
 
     // for backward compatibility
     virtual void consume_old_private_key(key_name const & ident,
                                          old_arc4_rsa_priv_key const & k)
     {
-      W(F("converting old-format private key '%s'") % ident);
+      W(F("converting old-format private key %s") % ident);
 
       rsa_pub_key dummy;
       kss.migrate_old_key_pair(ident, k, dummy);
 
-      L(FL("successfully read key pair '%s' from key store") % ident);
+      L(FL("successfully read key pair %s from key store") % ident);
     }
   };
 }
@@ -224,7 +224,7 @@ key_store_state::maybe_read_key_dir()
       read_data(*i, dat);
       istringstream is(dat());
       if (read_packets(is, kr) == 0)
-        W(F("ignored invalid key file ('%s') in key store") % (*i) );
+        W(F("ignored invalid key file '%s' in key store") % (*i) );
     }
 }
 
@@ -439,7 +439,7 @@ struct key_delete_validator : public pac
      key_id ident;
      key_hash_code(name, kp.pub, ident);
      E(ident == expected_ident, origin::user,
-       F("expected key with id '%s' in key file '%s', got key with id '%s'")
+       F("expected key with id %s in key file '%s', got key with id %s")
          % expected_ident % file % ident);
   }
   virtual void consume_old_private_key(key_name const & ident,
@@ -564,7 +564,7 @@ key_store_state::decrypt_private_key(key
   keypair kp;
   key_name name;
   E(maybe_get_key_pair(id, name, kp), origin::user,
-    F("no key pair '%s' found in key store '%s'") % id % key_dir);
+    F("no key pair %s found in key store '%s'") % id % key_dir);
 
   L(FL("%d-byte private key") % kp.priv().size());
 
@@ -730,11 +730,11 @@ key_store::create_key_pair(database & db
   // and save it.
   if (create_mode == create_verbose)
     {
-      P(F("storing key-pair '%s' in %s/") % ident % get_key_dir());
+      P(F("storing key-pair '%s' in '%s/'") % ident % get_key_dir());
     }
   else
     {
-      L(FL("storing key-pair '%s' in %s/") % ident % get_key_dir());
+      L(FL("storing key-pair '%s' in '%s/'") % ident % get_key_dir());
     }
   put_key_pair(ident, kp);
 
@@ -743,11 +743,11 @@ key_store::create_key_pair(database & db
       guard.acquire();
       if (create_mode == create_verbose)
         {
-          P(F("storing public key '%s' in %s") % ident % db.get_filename());
+          P(F("storing public key '%s' in '%s'") % ident % db.get_filename());
         }
       else
         {
-          L(FL("storing public key '%s' in %s") % ident % db.get_filename());
+          L(FL("storing public key '%s' in '%s'") % ident % db.get_filename());
         }
       db.put_key(ident, kp.pub);
       guard.commit();
@@ -960,7 +960,7 @@ key_store::add_key_to_agent(key_id const
 {
   ssh_agent & agent = s->get_agent();
   E(agent.connected(), origin::user,
-    F("no ssh-agent is available, cannot add key '%s'") % id);
+    F("no ssh-agent is available, cannot add key %s") % id);
 
   shared_ptr<RSA_PrivateKey> priv = s->decrypt_private_key(id);
 
@@ -1094,7 +1094,7 @@ key_store_state::migrate_old_key_pair
   // matches what we derived from the private key entry, but don't abort the
   // whole migration if it doesn't.
   if (!pub().empty() && !keys_match(id, pub, id, kp.pub))
-    W(F("public and private keys for %s don't match") % id);
+    W(F("public and private keys for %s do not match") % id);
 
   key_id hash;
   key_hash_code(id, kp.pub, hash);
============================================================
--- src/merge_content.cc	620a2b95ee5b72299ee2780ec0077e25c03b8795
+++ src/merge_content.cc	76d5a0997d9217b309d75806f8059d8f56f2ca49
@@ -273,7 +273,7 @@ content_merge_workspace_adaptor::get_ver
       read_data(i->second, tmp);
       calculate_ident(file_data(tmp), fid);
       E(fid == ident, origin::system,
-        F("file %s in workspace has id %s, wanted %s")
+        F("file '%s' in workspace has id %s, wanted %s")
         % i->second
         % fid
         % ident);
============================================================
--- src/cmd_automate.cc	2c5e25aeca1c3286a7f77a36f4a6a6581166177e
+++ src/cmd_automate.cc	35afc0ad1040244533e24ad489a8ec139d0b163b
@@ -302,7 +302,7 @@ LUAEXT(change_workspace, )
     }
   else
     {
-      i18n_format msg(F("directory %s is not a workspace") % ws);
+      i18n_format msg(F("directory '%s' is not a workspace") % ws);
       lua_pushboolean(LS, false);
       lua_pushlstring(LS, msg.str().data(), msg.str().size());
       return 2;
============================================================
--- src/cmd_db.cc	df467ec186dc1d1ebcae83d84495009dbeba53e9
+++ src/cmd_db.cc	419d06e8b256dbe4d0499e0da58725ad535d5570
@@ -404,7 +404,7 @@ CMD(db_set_epoch, "set_epoch", "", CMD_R
     throw usage(execid);
 
   E(idx(args, 1)().size() == constants::epochlen, origin::user,
-    F("The epoch must be %s characters") % constants::epochlen);
+    F("The epoch must be %d characters") % constants::epochlen);
 
   epoch_data ed(decode_hexenc_as<epoch_data>(idx(args, 1)(), origin::user));
   database db(app);
@@ -447,7 +447,7 @@ CMD(unset, "unset", "", CMD_REF(variable
 
   database db(app);
   E(db.var_exists(k), origin::user,
-    F("no var with name %s in domain %s") % n % d);
+    F("no var with name '%s' in domain '%s'") % n % d);
   db.clear_var(k);
 }
 
@@ -618,7 +618,7 @@ CMD_HIDDEN(rev_height, "rev_height", "",
   revision_id rid(decode_hexenc_as<revision_id>(idx(args, 0)(), origin::user));
   database db(app);
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % rid);
+    F("no revision %s found in database") % rid);
   rev_height height;
   db.get_rev_height(rid, height);
   P(F("cached height: %s") % height);
============================================================
--- src/cmd_diff_log.cc	94a354875b587d372e6d0898ab6fc8355ce29c21
+++ src/cmd_diff_log.cc	39f108bc1d798d8525503ea5cfa693188a378fab
@@ -667,7 +667,7 @@ log_common (app_state & app,
         {
           revision_id rid = edge_old_revision(i);
           E(db.revision_exists(rid), origin::user,
-            F("workspace parent revision '%s' not found - "
+            F("workspace parent revision %s not found - "
               "did you specify a wrong database?") % rid);
           starting_revs.insert(rid);
           if (i == rev.edges.begin())
============================================================
--- src/cmd_files.cc	7e9ffd4a06daeb0ef7ab7557d3a656c6b99ab2ad
+++ src/cmd_files.cc	88f831d8a59a91d45039fd91825b2d81c48349dd
@@ -187,11 +187,11 @@ CMD(annotate, "annotate", "", CMD_REF(in
 
   // find the version of the file requested
   E(roster.has_node(file), origin::user,
-    F("no such file '%s' in revision '%s'")
+    F("no such file '%s' in revision %s")
       % file % rid);
   const_node_t node = roster.get_node(file);
   E(is_file_t(node), origin::user,
-    F("'%s' in revision '%s' is not a file")
+    F("'%s' in revision %s is not a file")
       % file % rid);
 
   const_file_t file_node = downcast_to_file_t(node);
@@ -267,7 +267,7 @@ dump_file(database & db, std::ostream & 
 dump_file(database & db, std::ostream & output, revision_id rid, utf8 filename)
 {
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % rid);
+    F("no revision %s found in database") % rid);
 
   // Paths are interpreted as standard external ones when we're in a
   // workspace, but as project-rooted external ones otherwise.
@@ -277,11 +277,11 @@ dump_file(database & db, std::ostream & 
   marking_map marks;
   db.get_roster(rid, roster, marks);
   E(roster.has_node(fp), origin::user,
-    F("no file '%s' found in revision '%s'") % fp % rid);
+    F("no file '%s' found in revision %s") % fp % rid);
 
   const_node_t node = roster.get_node(fp);
   E((!null_node(node->self) && is_file_t(node)), origin::user,
-    F("no file '%s' found in revision '%s'") % fp % rid);
+    F("no file '%s' found in revision %s") % fp % rid);
 
   const_file_t file_node = downcast_to_file_t(node);
   dump_file(db, output, file_node->content);
============================================================
--- src/cmd_key_cert.cc	7a98615c6d189044c34f17845df337ca63905b41
+++ src/cmd_key_cert.cc	cf519d7c14bbf3bc69dff0a333a7be638c3e047d
@@ -140,7 +140,7 @@ dropkey_common(app_state & app,
       transaction_guard guard(db);
       if (db.public_key_exists(identity.id))
         {
-          P(F("dropping public key '%s' from database") % identity.id);
+          P(F("dropping public key %s from database") % identity.id);
           db.delete_public_key(identity.id);
           key_deleted = true;
         }
@@ -150,7 +150,7 @@ dropkey_common(app_state & app,
 
   if (drop_private && keys.key_pair_exists(identity.id))
     {
-      P(F("dropping key pair '%s' from keystore") % identity.id);
+      P(F("dropping key pair %s from keystore") % identity.id);
       keys.delete_key(identity.id);
       key_deleted = true;
     }
============================================================
--- src/cmd_list.cc	eede6b49bfe267b8e9b361c12bbcc26f53e86191
+++ src/cmd_list.cc	e8cdc511a1a5c9f8234bb45628fcb949342cd57f
@@ -126,7 +126,7 @@ CMD(certs, "certs", "", CMD_REF(list), "
       {
         if (checked.find(idx(certs, i).key) == checked.end() &&
             !db.public_key_exists(idx(certs, i).key))
-          P(F("no public key '%s' found in database")
+          P(F("no public key %s found in database")
             % idx(certs, i).key);
         checked.insert(idx(certs, i).key);
       }
@@ -445,7 +445,7 @@ CMD(keys, "keys", "", CMD_REF(list), "[P
         }
       if (have_keystore_only_key)
         {
-          cout << (F("(*) - only in %s/")
+          cout << (F("(*) - only in '%s/'")
                    % keys.get_key_dir()) << '\n';
         }
       cout << "\n";
@@ -524,7 +524,7 @@ CMD(epochs, "epochs", "", CMD_REF(list),
         {
           map<branch_name, epoch_data>::const_iterator j =
             epochs.find(typecast_vocab<branch_name>((*i)));
-          E(j != epochs.end(), origin::user, F("no epoch for branch %s") % *i);
+          E(j != epochs.end(), origin::user, F("no epoch for branch '%s'") % *i);
           cout << encode_hexenc(j->second.inner()(),
                                 j->second.inner().made_from)
                << ' ' << j->first << '\n';
@@ -656,7 +656,7 @@ print_workspace_info(database & db, lua_
       if (!workspace_opts.branch_given)
         workspace_branch = _("<no branch set>");
 
-      out << indent << F("%s (in %s)") % workspace_branch % workspace_path << '\n';
+      out << indent << F("%s (in '%s')") % workspace_branch % workspace_path << '\n';
     }
 
     if (!has_valid_workspaces)
@@ -1031,7 +1031,7 @@ CMD_AUTOMATE(certs, N_("REV"),
   revision_id rid(decode_hexenc_as<revision_id>(hrid(), origin::user));
 
   E(db.revision_exists(rid), origin::user,
-    F("no such revision '%s'") % hrid);
+    F("no revision %s found in database") % hrid);
 
   vector<cert> ts;
   // FIXME_PROJECTS: after projects are implemented,
@@ -1047,7 +1047,7 @@ CMD_AUTOMATE(certs, N_("REV"),
       {
         if (checked.find(idx(certs, i).key) == checked.end() &&
             !db.public_key_exists(idx(certs, i).key))
-          W(F("no public key '%s' found in database")
+          W(F("no public key %s found in database")
             % idx(certs, i).key);
         checked.insert(idx(certs, i).key);
       }
============================================================
--- src/cmd_merging.cc	d5f35495dd5c6d7f1749c043ec84d8577519252b
+++ src/cmd_merging.cc	98408f1a05aecbe60293c0607be8f6ae65f83940
@@ -269,7 +269,7 @@ update(app_state & app,
   // wants.
   bool switched_branch = pick_branch_for_update(app.opts, db, project, chosen_rid);
   if (switched_branch)
-    P(F("switching to branch %s") % app.opts.branch());
+    P(F("switching to branch '%s'") % app.opts.branch());
 
   // Okay, we have a target, we have a branch, let's do this merge!
 
@@ -352,7 +352,7 @@ update(app_state & app,
   work.set_options(app.opts, app.lua, true);
 
   if (switched_branch)
-    P(F("switched branch; next commit will use branch %s") % app.opts.branch());
+    P(F("switched branch; next commit will use branch '%s'") % app.opts.branch());
   P(F("updated to base revision %s") % chosen_rid);
 }
 
@@ -700,7 +700,7 @@ void perform_merge_into_dir(app_state & 
             pth.dirname_basename(dir, base);
 
             E(right_roster.has_node(dir), origin::user,
-              F("Path %s not found in destination tree.") % pth);
+              F("Path '%s' not found in destination tree.") % pth);
             const_node_t parent = right_roster.get_node(dir);
             moved_root->parent = parent->self;
             moved_root->name = base;
============================================================
--- src/cmd_netsync.cc	5b0013c5fa9dd8d362bf5c6c627099e0aaad06da
+++ src/cmd_netsync.cc	d29a3ff0c706db0f2cae9323f53079bca16fd889
@@ -328,7 +328,7 @@ print_dryrun_info_cmd(protocol_role role
       for (map<branch_name, int>::iterator i = branch_counts.begin();
            i != branch_counts.end(); ++i)
         {
-          std::cout << (F("%9d in branch %s\n") % i->second % i->first);
+          std::cout << (F("%9d in branch '%s'\n") % i->second % i->first);
         }
     }
 }
@@ -866,12 +866,12 @@ CMD_NO_WORKSPACE(clone, "clone", "", CMD
         F("branch '%s' is empty") % app.opts.branch);
       if (heads.size() > 1)
         {
-          P(F("branch %s has multiple heads:") % app.opts.branch);
+          P(F("branch '%s' has multiple heads:") % app.opts.branch);
           for (set<revision_id>::const_iterator i = heads.begin(); i != heads.end(); ++i)
             P(i18n_format("  %s")
               % describe_revision(app.opts, app.lua, project, *i));
           P(F("choose one with '%s clone -r<id> URL'") % prog_name);
-          E(false, origin::user, F("branch %s has multiple heads") % app.opts.branch);
+          E(false, origin::user, F("branch '%s' has multiple heads") % app.opts.branch);
         }
       ident = *(heads.begin());
     }
@@ -882,7 +882,7 @@ CMD_NO_WORKSPACE(clone, "clone", "", CMD
 
       E(project.revision_is_in_branch(ident, app.opts.branch),
         origin::user,
-        F("revision %s is not a member of branch %s")
+        F("revision %s is not a member of branch '%s'")
           % ident % app.opts.branch);
     }
 
============================================================
--- src/cmd_othervcs.cc	ba717ebc34573dc41258fe4f679632876097eb48
+++ src/cmd_othervcs.cc	631506764856b783b32348dc411abc5b174037f5
@@ -57,7 +57,7 @@ CMD(cvs_import, "cvs_import", "", CMD_RE
 
   system_path cvsroot(idx(args, 0)(), origin::user);
   require_path_is_directory(cvsroot,
-                            F("path %s does not exist") % cvsroot,
+                            F("path '%s' does not exist") % cvsroot,
                             F("'%s' is not a directory") % cvsroot);
 
   // make sure we can sign certs using the selected key; also requests
============================================================
--- src/cmd_ws_commit.cc	7b91a53d8eec4e78b062092f85c945bbaaefe23a
+++ src/cmd_ws_commit.cc	b701f509d9afc92c35619f8b5eae31c13366e62d
@@ -482,7 +482,7 @@ revert(app_state & app,
                 % f->content);
 
               E(db.file_version_exists(f->content), origin::user,
-                F("no file version %s found in database for %s")
+                F("no file version %s found in database for '%s'")
                 % f->content % path);
 
               file_data dat;
@@ -496,7 +496,7 @@ revert(app_state & app,
         {
           if (!directory_exists(path))
             {
-              P(F("recreating %s/") % path);
+              P(F("recreating '%s/'") % path);
               mkdir_p(path);
             }
           else
@@ -732,7 +732,7 @@ CMD(mkdir, "mkdir", "", CMD_REF(workspac
       // project with a mkdir statement, but one never can tell...
       E(app.opts.no_ignore || !work.ignore_file(fp),
         origin::user,
-        F("ignoring directory '%s' [see .mtn-ignore]") % fp);
+        F("ignoring directory '%s' (see .mtn-ignore)") % fp);
 
       paths.insert(fp);
     }
@@ -859,7 +859,7 @@ CMD(rename, "rename", "mv", CMD_REF(work
   if (src_paths.size() == 1 && dstr()[dstr().size() -1] == '/')
     if (get_path_status(*src_paths.begin()) != path::directory)
       E(get_path_status(dst_path) == path::directory, origin::user,
-        F(_("The specified target directory %s/ doesn't exist.")) % dst_path);
+        F(_("The specified target directory '%s/' doesn't exist.")) % dst_path);
 
   work.perform_rename(db, src_paths, dst_path, app.opts.bookkeep_only);
 }
@@ -1030,13 +1030,13 @@ checkout_common(app_state & app,
         F("branch '%s' is empty") % app.opts.branch);
       if (heads.size() > 1)
         {
-          P(F("branch %s has multiple heads:") % app.opts.branch);
+          P(F("branch '%s' has multiple heads:") % app.opts.branch);
           for (set<revision_id>::const_iterator i = heads.begin(); i != heads.end(); ++i)
             P(i18n_format("  %s")
               % describe_revision(app.opts, app.lua, project, *i));
           P(F("choose one with '%s checkout -r<id>'") % prog_name);
           E(false, origin::user,
-            F("branch %s has multiple heads") % app.opts.branch);
+            F("branch '%s' has multiple heads") % app.opts.branch);
         }
       revid = *(heads.begin());
     }
@@ -1893,7 +1893,7 @@ CMD_NO_WORKSPACE(import, "import", "", C
 
       E(project.revision_is_in_branch(ident, app.opts.branch),
         origin::user,
-        F("revision %s is not a member of branch %s")
+        F("revision %s is not a member of branch '%s'")
         % ident % app.opts.branch);
     }
   else
@@ -1907,13 +1907,13 @@ CMD_NO_WORKSPACE(import, "import", "", C
                                app.opts.ignore_suspend_certs);
       if (heads.size() > 1)
         {
-          P(F("branch %s has multiple heads:") % app.opts.branch);
+          P(F("branch '%s' has multiple heads:") % app.opts.branch);
           for (set<revision_id>::const_iterator i = heads.begin(); i != heads.end(); ++i)
             P(i18n_format("  %s")
               % describe_revision(app.opts, app.lua, project, *i));
           P(F("choose one with '%s import -r<id>'") % prog_name);
           E(false, origin::user,
-            F("branch %s has multiple heads") % app.opts.branch);
+            F("branch '%s' has multiple heads") % app.opts.branch);
         }
       if (!heads.empty())
         ident = *(heads.begin());
============================================================
--- src/migrate_work.cc	2a8658dad954b975e65d3ceaf5df608007bfe841
+++ src/migrate_work.cc	656cfec56129df09e8f9f5b3f75ea71bfef0c6e8
@@ -76,7 +76,7 @@ get_workspace_format()
       catch (exception & e)
         {
           E(false, origin::system,
-            F("workspace is corrupt: %s is invalid")
+            F("workspace is corrupt: '%s' is invalid")
             % f_path);
         }
       if (format == 1)
@@ -194,7 +194,7 @@ migrate_1_to_2()
     }
   catch (exception & e)
     {
-      E(false, origin::system, F("workspace is corrupt: reading %s: %s")
+      E(false, origin::system, F("workspace is corrupt: reading '%s': %s")
         % rev_path % e.what());
     }
   revision_id base_rid(decode_hexenc_as<revision_id>(remove_ws(base_rev_data()),
@@ -216,7 +216,7 @@ migrate_1_to_2()
       catch (exception & e)
         {
           E(false, origin::system,
-            F("workspace is corrupt: reading %s: %s")
+            F("workspace is corrupt: reading '%s': %s")
             % workcs_path % e.what());
         }
 
============================================================
--- src/cmd_conflicts.cc	0c87cd90c1e90cbb312e51c2aa175acadf07bd20
+++ src/cmd_conflicts.cc	fe887aac3fd84b1048360551dd8c8638d059ea7d
@@ -68,7 +68,7 @@ show_conflicts(database & db, conflicts_
           else
             conflicts.right_roster->get_name(conflict.nid, name);
 
-          P(F("orphaned node %s") % name);
+          P(F("orphaned node '%s'") % name);
 
           switch (show_case)
             {
============================================================
--- src/cmd.cc	d824fb0d9171a2ab5df2a0984ddcd9e789548ec5
+++ src/cmd.cc	8e757d3b01172b53644e669d4fb7f0ec097555d1
@@ -456,7 +456,7 @@ namespace commands {
       app.mtn_automate_allowed = false;
 
       E(ll.ok(), origin::user,
-        F("Call to user command %s (lua command: %s) failed.")
+        F("Call to user command '%s' (lua command: '%s') failed.")
         % primary_name() % f_name);
     }
   };
@@ -467,7 +467,7 @@ LUAEXT(alias_command, )
   const char *old_cmd = luaL_checkstring(LS, -2);
   const char *new_cmd = luaL_checkstring(LS, -1);
   E(old_cmd && new_cmd, origin::user,
-    F("%s called with an invalid parameter") % "alias_command");
+    F("'%s' called with an invalid parameter") % "alias_command");
 
   args_vector args;
   args.push_back(arg_type(old_cmd, origin::user));
@@ -491,7 +491,7 @@ LUAEXT(register_command, )
 
   E(cmd_name && cmd_params && cmd_abstract && cmd_desc && cmd_func,
     origin::user,
-    F("%s called with an invalid parameter") % "register_command");
+    F("'%s' called with an invalid parameter") % "register_command");
 
   // leak this - commands can't be removed anyway
   new commands::cmd_lua(cmd_name, cmd_params, cmd_abstract, cmd_desc,
@@ -535,9 +535,9 @@ CMD_NO_WORKSPACE(version, "version", "",
     print_version();
 }
 
-CMD_HIDDEN(check_globish, "check_globish", "", CMD_REF(debug),
-           "globish string",
-           N_("Check that a particular globish matches a particular string"),
+CMD_HIDDEN(check_glob, "check_glob", "", CMD_REF(debug),
+           "glob string",
+           N_("Check that a particular glob matches a particular string"),
            "",
            options::opts::none)
 {
@@ -545,7 +545,7 @@ CMD_HIDDEN(check_globish, "check_globish
   string s(idx(args,1)());
 
   E(g.matches(s), origin::user,
-    F("Globish <%s> does not match string <%s>") % g % s);
+    F("Glob '%s' does not match string '%s'") % g % s);
 }
 
 CMD_HIDDEN(crash, "crash", "", CMD_REF(debug),
============================================================
--- src/migrate_ancestry.cc	7e659f3dbdf54efc68479591f24ecaef3db3edd8
+++ src/migrate_ancestry.cc	7cee409c3b099ef96551cd53a1d16684108fefdb
@@ -486,7 +486,7 @@ insert_parents_into_roster(roster_t & ch
   if (child_roster.has_node(pth))
     {
       E(is_dir_t(child_roster.get_node(pth)), origin::internal,
-        F("Directory %s for path %s cannot be added, "
+        F("Directory '%s' for path '%s' cannot be added, "
           "as there is a file in the way") % pth % full);
       return;
     }
@@ -507,10 +507,10 @@ insert_into_roster(roster_t & child_rost
     {
       const_node_t n = child_roster.get_node(pth);
       E(is_file_t(n), origin::internal,
-        F("Path %s cannot be added, as there is a directory in the way") % pth);
+        F("Path '%s' cannot be added, as there is a directory in the way") % pth);
       const_file_t f = downcast_to_file_t(n);
       E(f->content == fid, origin::internal,
-        F("Path %s added twice with differing content") % pth);
+        F("Path '%s' added twice with differing content") % pth);
       return;
     }
 
============================================================
--- src/merge_conflict.cc	13b609e15f2f3e2206fdb21d0f5ab5def44c2442
+++ src/merge_conflict.cc	49713287fe6913386578c2956917569165ebdd84
@@ -2282,9 +2282,9 @@ attach_node (lua_hooks & lua,
   I(!target_path.empty());
 
   E(!new_roster.has_node(target_path), origin::user,
-    F("%s already exists") % target_path.as_external());
+    F("'%s' already exists") % target_path.as_external());
   E(new_roster.has_node(target_path.dirname()), origin::user,
-    F("directory %s does not exist or is unknown") % target_path.dirname());
+    F("directory '%s' does not exist or is unknown") % target_path.dirname());
 
   new_roster.attach_node (nid, target_path);
 
@@ -2334,27 +2334,27 @@ roster_merge_result::resolve_orphaned_no
           if (is_dir_t(roster.get_node(conflict.nid)))
             {
               E(downcast_to_dir_t(roster.get_node(conflict.nid))->children.empty(), origin::user,
-                F("can't drop directory %s; it is not empty") % name);
+                F("can't drop directory '%s'; it is not empty") % name);
             }
 
-          P(F("dropping %s") % name);
+          P(F("dropping '%s'") % name);
           roster.drop_detached_node(conflict.nid);
           break;
 
         case resolve_conflicts::rename:
-          P(F("renaming %s to %s") % name % *conflict.resolution.second);
+          P(F("renaming '%s' to '%s'") % name % *conflict.resolution.second);
           attach_node
             (lua, roster, conflict.nid, file_path_internal (conflict.resolution.second->as_internal()));
           break;
 
         case resolve_conflicts::none:
           E(false, origin::user,
-            F("no resolution provided for orphaned_node %s") % name);
+            F("no resolution provided for orphaned_node '%s'") % name);
           break;
 
         default:
           E(false, origin::user,
-            F("invalid resolution for orphaned_node %s") % name);
+            F("invalid resolution for orphaned_node '%s'") % name);
         }
     } // end for
 
@@ -2378,9 +2378,9 @@ resolve_duplicate_name_one_side(lua_hook
         E(other_resolution.first == resolve_conflicts::drop ||
           other_resolution.first == resolve_conflicts::rename,
           origin::user,
-          F("inconsistent left/right resolutions for %s") % name);
+          F("inconsistent left/right resolutions for '%s'") % name);
 
-        P(F("replacing content of %s with %s") % name % resolution.second->as_external());
+        P(F("replacing content of '%s' with '%s'") % name % resolution.second->as_external());
 
         file_id result_fid;
         file_data parent_data, result_data;
@@ -2402,12 +2402,12 @@ resolve_duplicate_name_one_side(lua_hook
       break;
 
     case resolve_conflicts::drop:
-      P(F("dropping %s") % name);
+      P(F("dropping '%s'") % name);
 
       if (is_dir_t(result_roster.get_node(nid)))
         {
           const_dir_t n = downcast_to_dir_t(result_roster.get_node(nid));
-          E(n->children.empty(), origin::user, F("can't drop %s; not empty") % name);
+          E(n->children.empty(), origin::user, F("can't drop '%s'; not empty") % name);
         }
       result_roster.drop_detached_node(nid);
       break;
@@ -2416,26 +2416,26 @@ resolve_duplicate_name_one_side(lua_hook
       E(other_resolution.first == resolve_conflicts::drop ||
         other_resolution.first == resolve_conflicts::rename,
         origin::user,
-        F("inconsistent left/right resolutions for %s") % name);
+        F("inconsistent left/right resolutions for '%s'") % name);
 
-      P(F("keeping %s") % name);
+      P(F("keeping '%s'") % name);
       attach_node (lua, result_roster, nid, name);
       break;
 
     case resolve_conflicts::rename:
-      P(F("renaming %s to %s") % name % *resolution.second);
+      P(F("renaming '%s' to '%s'") % name % *resolution.second);
       attach_node
         (lua, result_roster, nid, file_path_internal (resolution.second->as_internal()));
       break;
 
     case resolve_conflicts::none:
       E(false, origin::user,
-        F("no resolution provided for duplicate_name %s") % name);
+        F("no resolution provided for duplicate_name '%s'") % name);
       break;
 
     case resolve_conflicts::content_internal:
       E(false, origin::user,
-        F("invalid resolution for duplicate_name %s") % name);
+        F("invalid resolution for duplicate_name '%s'") % name);
       break;
 
     default:
@@ -2535,9 +2535,9 @@ roster_merge_result::resolve_file_conten
               E(resolve_conflicts::do_auto_merge(lua, conflict, adaptor, left_roster,
                                                  right_roster, this->roster, merged_id),
                 origin::user,
-                F("merge of %s, %s failed") % left_name % right_name);
+                F("merge of '%s', '%s' failed") % left_name % right_name);
 
-              P(F("merged %s, %s") % left_name % right_name);
+              P(F("merged '%s', '%s'") % left_name % right_name);
 
               file_t result_node = downcast_to_file_t(roster.get_node_for_update(conflict.nid));
               result_node->content = merged_id;
@@ -2546,7 +2546,7 @@ roster_merge_result::resolve_file_conten
 
           case resolve_conflicts::content_user:
             {
-              P(F("replacing content of %s, %s with %s") %
+              P(F("replacing content of '%s', '%s' with '%s'") %
                 left_name % right_name % conflict.resolution.second->as_external());
 
               file_id result_id;

reply via email to

[Prev in Thread] Current Thread [Next in Thread]