monotone-commits-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Monotone-commits-diffs] net.venge.monotone.whitespace-cleanup: 7025134


From: code
Subject: [Monotone-commits-diffs] net.venge.monotone.whitespace-cleanup: 702513409c56dcc2a30cab353f82dceac3906478
Date: Sun, 10 Apr 2011 11:22:36 +0200 (CEST)

revision:            702513409c56dcc2a30cab353f82dceac3906478
date:                2011-04-09T23:09:43
author:              Richard Hopkins <address@hidden>
branch:              net.venge.monotone.whitespace-cleanup
changelog:
propagate from branch 'net.venge.monotone' (head 
239d3d013399793455ccc3f42246163591e35631)
            to branch 'net.venge.monotone.whitespace-cleanup' (head 
c605363c7792a1f0c09c792ec8a0cb49c8ab257d)

manifest:
format_version "1"

new_manifest [e190ec3450885f97d03ca932af4cf24d6fdd7c89]

old_revision [239d3d013399793455ccc3f42246163591e35631]

patch "src/adler32.hh"
 from [a1669df544fd3ccc233417b87b7bfaa447e63071]
   to [9159cac29f5622d49e192499c8a3dfaf42dd0d37]

patch "src/ancestry.cc"
 from [8b3388b690a5f4878bd29d752c3e6e073411739e]
   to [bef401a1c928d3ddad8ee9277e68da204a7767ad]

patch "src/annotate.cc"
 from [7defae118c92570f6b8adccffd17bd913ecbf041]
   to [20967a20c46eff02daaebfe13ad5acad62365b3e]

patch "src/asciik.cc"
 from [cf946f9a14ad309615704bc960255c50e12b636a]
   to [f5f8ac069640616c8e4ae07054ee8de46f38df52]

patch "src/automate.cc"
 from [efa4ecceab7f1e31f71778f325da312bf6aefba5]
   to [8d490e988b46a2751ab98037226a51b1822e03ae]

patch "src/automate_ostream.hh"
 from [33895c7883416f107f9c0174fb16c16b1b1db81d]
   to [cf84e9fef2749917851238c5a016bf04da518e6f]

patch "src/automate_ostream_demuxed.hh"
 from [cf23c94fe10bf23a627f1bc8b6ecda2c31a179b0]
   to [1db92d43327030688b5aba626333261d82e77ffc]

patch "src/automate_reader.cc"
 from [ac60e48d6902a3cef96bc86a93640936c125beee]
   to [f0c56bbb217e83dc2599f4c1109341ab1940f90b]

patch "src/automate_reader.hh"
 from [284ff60073d0e4861ab516408fa82a8f7a295b04]
   to [8453dbdba80b72a8862c01dee3f0de6209d99f89]

patch "src/automate_stdio_helpers.hh"
 from [b9eaf1c16872c7b18e37edd03b5bfb56b47e690e]
   to [13651121e555f69e325843fe7de6a117322abadd]

patch "src/base.hh"
 from [a8d19ce4d4aab1fcfbc2f868fd09779c3c0e3ffa]
   to [9b269776dd26976070e0eefb6d2b9af3fa0fa546]

patch "src/basic_io.cc"
 from [95ae5bc17a2c7140bfe13691036591b8975bcf41]
   to [71218bcaf90a2006d525e6415e630af029d6fcb8]

patch "src/basic_io.hh"
 from [64cbd940961328cf9c6a868e015424e35efad4cf]
   to [39572b45cd5f6028b163236683399d422102e2e3]

patch "src/cache_logger.cc"
 from [e5e7cc4fb068945e7a7a8cf0450dec6d31f160e3]
   to [98a87cb16e7d67366c3e9696abe2dacf94358af8]

patch "src/cert.cc"
 from [8d074780068206b16f302c9e5b0ab6da609f3448]
   to [b3236ff082a766473a15a631b7f4802a6e1f8df7]

patch "src/char_classifiers.hh"
 from [380ead182576526e2343bd873b19114770015979]
   to [0cead355dc925eed5f29289e1823562254206c09]

patch "src/charset.cc"
 from [bb433fcc004c011696478d5c9d5626b2c72c78e7]
   to [89295ac94fc9e2e8e8b3e8ac434da88e0ca86573]

patch "src/cleanup.hh"
 from [34bc7dde900a7f6d91534821886f91d1f9409409]
   to [0afd087eff03aec913a00377e1659fc42de10dad]

patch "src/cmd.cc"
 from [47891700f02feb1a6278c175fc56f5253c91ea49]
   to [43998ec9a6a25f9535dbecf3c777a0462829aa44]

patch "src/cmd.hh"
 from [968a5cbc4b9dc33badca1e93a6f387eaa1c70c9e]
   to [2860f8c48f7e7ba850201c496a1bd33c3dbe7975]

patch "src/cmd_automate.cc"
 from [9a39706690d6edbe1233e0cb613bc3ab8d0b3fe3]
   to [dff0158a02050a257e2c934debaa6c2478827eb8]

patch "src/cmd_conflicts.cc"
 from [ad9dc74c27d4d8c50a74e5825ec9e0a12cdc1b09]
   to [41cf02819e27393713a9bae4fbd57f6776cae3d5]

patch "src/cmd_db.cc"
 from [6bc1764d38063b0f5a23dd490f02786daafad0b7]
   to [bfab83a5c295268ab4836ec9a2b57cae28a59307]

patch "src/cmd_diff_log.cc"
 from [b24dffb0470d057fa1e91ccc5a81627b40252e78]
   to [0267ac98cd2578c6c3850a324b46661fb44468a3]

patch "src/cmd_files.cc"
 from [f15378efd25e597762776ad6e9c6f78c1a4ff191]
   to [70fbf79b0e8c66d1d20c16275423fca4cfbd3a07]

patch "src/cmd_key_cert.cc"
 from [a48989eb8c9721a45469b0b1f44b36b1771a2eeb]
   to [2e654c11156881fc618833ee7c394917dbf014e2]

patch "src/cmd_list.cc"
 from [5eee1fff3753df893cfc1325d362f6bc8445dd11]
   to [a7e0dc06b350912b9af7fbb8d7bc7255646b0503]

patch "src/cmd_merging.cc"
 from [84b177469ecb60664be23c42c38cc649d23899a7]
   to [f5fdbc18e48e9345a8a00dd544cd7249e0089cfc]

patch "src/cmd_netsync.cc"
 from [88ca982ea191c22c88b490e87695b359754aff45]
   to [e163b9d3dc58d4efec67576c1f10c22ff43a66e6]

patch "src/cmd_othervcs.cc"
 from [1b4be44b0e712375f9883e710a833dfac840c9d8]
   to [6f1e87b6e3a4299dcbd516048255af97ffd2b7b1]

patch "src/cmd_packet.cc"
 from [12adecc7e55564b04e6a1a0541600d4fd170e8a8]
   to [ca92628d9c4bc16e61fe562ab06234ba34bbc66e]

patch "src/cmd_ws_commit.cc"
 from [d9dbacb820c6d070c4952ee2b0f143e61e85631e]
   to [6be2215b9897c62bc50655979079b35cd0dbf91b]

patch "src/commands.cc"
 from [d63ab460bfd07e887646fb4e5ead6a5ab4c2d0c8]
   to [d90078c4696fc5ca4acc805e3258e2395466f518]

patch "src/commands.hh"
 from [23533dfe704f8fae0492404a0188363fea27d9c1]
   to [427e72f771ee3986eae0bd76558dd41e94964fe9]

patch "src/constants.cc"
 from [b0f021f8b9a3f968cf942929122b02f09d4fda6f]
   to [e06b82f6f7fd5a37367893418df17efc65b18ebc]

patch "src/cow_trie.hh"
 from [4531a829958b3a0f92abd30d4ef9bd35ef5e07a3]
   to [a92482fc7f6caddb8c59e0e139d577d55fa7ede4]

patch "src/cset.cc"
 from [cc01fceb3ba2c16bb6079ab5f7a93f0d84f8ca85]
   to [66a5d84e01a9327ea925530be133bb26745ac871]

patch "src/cset.hh"
 from [90ea3f1a13761e0b3bd3ec3c894f7b91feb4a4c6]
   to [fc616593ea10593fff39cd9c2cd88627d24b919b]

patch "src/current_exception.hh"
 from [4cdea3209b8fbd10ba6ea4b71db8930dda3b4d08]
   to [f2849b15fe31d4e665b15f72c3a5922a382e478e]

patch "src/cycle_detector.hh"
 from [f77b04f6c75c70dbb8e99265aa3d36d0f2f244af]
   to [04d5b7dfbeec25955f4abc4c0c815417529b11db]

patch "src/database.cc"
 from [4c259f963c440fc95564dfec99b2f832f0bad643]
   to [fd145c90f06d2e0eb22409c786d76d53b6a4161a]

patch "src/database.hh"
 from [66798497f77bc1010b7e5850d2c406eaf1010b2e]
   to [38b1b0f245c71d20e896fa05660203b0060c0fc3]

patch "src/database_check.cc"
 from [b926df22bc2ec6134600d55072462c9a3df1b8b3]
   to [5dbb5cc92981d63f9c7dfdf2d43a3d5e71e042b1]

patch "src/date_format.hh"
 from [2100f7a1854afa9573d3991e41bbdc36f75c7e5b]
   to [0d066f33ef46ae7aebb2fd3f603d77e8578f3f50]

patch "src/dates.cc"
 from [02648f8882988ad5122eccc39528249b6333b219]
   to [6027cc482fe80ee75f83f00f26e916b87414c405]

patch "src/dates.hh"
 from [3a730e5536cece2437e9caad0e4f8f2767e4887d]
   to [009891adf5e29e47deb4c8021a05ca1391f279c2]

patch "src/diff_output.cc"
 from [8df7c0bec2e64275f6c8e8b6ccb035c8fd0f684f]
   to [9d30ea1f245cb9850a43fe16c841054e5e890605]

patch "src/enumerator.cc"
 from [9e64e9a81b061b38abfcdf0b70e2047e1eb9436f]
   to [0fb69cf10949735f6409400127c25b819d0c9e8f]

patch "src/enumerator.hh"
 from [5e52b8fae270dcabc4ada249327adcd496521a73]
   to [ab18cb92e9046d2bd258f8f5ee07f353779f9dca]

patch "src/file_io.cc"
 from [76874e5ca4978bd0fd3ba183cd19edc3447ece0d]
   to [7536c178477a2afff169c60634392f832948b269]

patch "src/file_io.hh"
 from [1ecc0d6d6f9dddeb676ac95ca074c2dfb82e58d0]
   to [ee0eb7ff39d28ff626a9095fa5e88a064035dcba]

patch "src/git_change.hh"
 from [f2447c0cbf0311d85622f0aada664bce13ad1f10]
   to [a135101e3bed12f359ad648392a7cc74da2bf7d2]

patch "src/git_export.cc"
 from [05a325fef7bd56f529e55db02fd2542acd5d06f7]
   to [fc30050129f802453cbcb42f128bccec194862cb]

patch "src/git_export.hh"
 from [6888ff1cc0d73bf0b30ba975d6c4050bb345d9de]
   to [0992d00e68c6e6bce6b4ee5301ba029a220bd5c2]

patch "src/globish.cc"
 from [05742d8536befa06b6edfb9a85308e45005375c4]
   to [34b3abdc8e3423e6ca1b9a275ef8a1f14d045b42]

patch "src/graph.cc"
 from [bdda595f7f9c3b6f1513389550f316bf3ebe688d]
   to [73f589e3aeded5a90b01e9bcf4012bc311f66110]

patch "src/graph.hh"
 from [ab4c40bcdcde42949481cbc0e3c29ba66ab07a9c]
   to [2b65afd542c728777ac75d0de33ccae977be96d6]

patch "src/gzip.cc"
 from [e7c19bee910e3d421fd073933810ec52092b1d9b]
   to [8cab5f5f7d09c2a9a31c350523de06cc262ae163]

patch "src/gzip.hh"
 from [649dfc0b250954f39c0f73870f8bec3f32f7fa43]
   to [4a6632de2513393bf015f131abb768723708930a]

patch "src/hash_map.hh"
 from [10db5cb434769d726d1f354c7ac19c8d102152b9]
   to [7304e09d7b6002f07b01285b996abd2cbf6f1f22]

patch "src/inodeprint.cc"
 from [aa6821e0691244214d92f7a911daf130daf52f6d]
   to [21cdfd976d545b141021daf701d3caa723ed306b]

patch "src/inodeprint.hh"
 from [7def309187f69a62288c27063692f9f85d4cb848]
   to [31afe053768a80cdaf82453f4147ea5cdb762924]

patch "src/interner.hh"
 from [c300647e6937a8a68f33fbbcd647e6fd9455a87f]
   to [9ff590310d8c5fae9da5acd1b510788c223a9da7]

patch "src/key_packet.cc"
 from [d1306df89dd684badac02c03744cd446381c07d3]
   to [50ba70750429646d0928838a45482198db962cf2]

patch "src/key_store.cc"
 from [64c4c4bed1fadc26b51207b9f61343f9dd7d3c6e]
   to [9b092fc3342d8d9e8a31842712119bd0dabddf4e]

patch "src/key_store.hh"
 from [82ae903386eb882a602f5938ef5e5d9284187600]
   to [c0b0f30cfbbbb484dbbc37fdd1480ed2c50fa829]

patch "src/keys.cc"
 from [5e83c66959453e7454ea99b5a403c41ab4982461]
   to [c5ed270951bef96b89e720da1d1c6ee859fcf93f]

patch "src/lcs.cc"
 from [a7b9e3b57dea4f4648ca2dd3de5cd301ec36802e]
   to [8916a27e95b7b0e935c27ffbb25f7a016b8e1a26]

patch "src/legacy.cc"
 from [c936ff983cd8724de334694888504bd82e0c5902]
   to [be33ebf3219e52634608496f20ebdda1cfa9df85]

patch "src/legacy.hh"
 from [6758cc428dda77af648e712d572e0b6c764146b4]
   to [3289ce367079a8a3ffb96a9673d8f531c693d97d]

patch "src/lexical_cast.hh"
 from [fe70361a7da25ed93b4b46b266220fcfa29bf5c3]
   to [6d5d88588259fedae57308d38fa84bfdc384ebb4]

patch "src/lru_writeback_cache.hh"
 from [cfa4c94526bd19a2084d2f1086f1fe85364d0062]
   to [13d1849bab48c3a4c564d604bf4caa601e311efd]

patch "src/lua.cc"
 from [40d590a9716e281f2004c1b5eb19c2b0926e7dde]
   to [be3fdb610d743d62a3cc80d47b9332f86cbfe430]

patch "src/lua.hh"
 from [146872f14cc23c6179c37bcda8667a0ecd178826]
   to [9e644f9cd3750b68f9ea7d954428fd3ba7ac6b74]

patch "src/lua_hooks.cc"
 from [1c178085332c73dcefb4681d205d17b059e52080]
   to [9649d1ff6898e68754148ec117c9528a11475a21]

patch "src/lua_hooks.hh"
 from [66412b9fa5db97cd3b3ec01cadf036fb346ac161]
   to [dab3af10dfb9901cdfefe51e4ffc5164ae04acfa]

patch "src/luaext_globish.cc"
 from [470ed3d2b5443f902335346ae5d4afc4f0ef8ffa]
   to [d13bd3416b058e26057aa94a0c01ade0b83d5e6f]

patch "src/luaext_guess_binary.cc"
 from [2bc668e174bf62b58c47a9bbba758362d12639d9]
   to [e6c9a70da996bbe13be349b64e87e2dd94007724]

patch "src/luaext_mkstemp.cc"
 from [5170df290e82446908b2aee9704d1e905df0d03c]
   to [9b9dc591dd05d05bce1654ce1d51da048598d80e]

patch "src/luaext_parse_basic_io.cc"
 from [995b37d3cc3eb58a3c6be6db06e38f997ba6845e]
   to [088b7865260014a7d079b071ebad66bc769a4027]

patch "src/luaext_platform.cc"
 from [288a571d7c91fbb4abffb66611b52aac74889aa2]
   to [2c1c19ecf2b2745eecba2063cf23a83e09b672da]

patch "src/maybe_workspace_updater.cc"
 from [f1d41132d47052f6b9f54980de4feb21348e715a]
   to [c1ee022eb8ee6e62581d88448950f245f8fedcf6]

patch "src/merge_3way.cc"
 from [f415f5ddbcfafbc8c822b2e3a848f48f968287af]
   to [1ff26ba435cf766c0f450ae3be61df5f3815aba9]

patch "src/merge_conflict.cc"
 from [1c09dc4a20532ce040c429d7fb5ed2a25a199856]
   to [ea11613afa8d7315cc63e82d5c30ed748f8586a1]

patch "src/merge_content.cc"
 from [76d5a0997d9217b309d75806f8059d8f56f2ca49]
   to [f17fa659968e200e935c7fc580b2d50291cff91a]

patch "src/merge_content.hh"
 from [c74c14bb8031b9bf67f684d9ddfdf6624f424f01]
   to [4860686e0c6805239f17ad0a69e52aaf794aca40]

patch "src/merge_roster.cc"
 from [98297d6264f77d540fc8e1578b1ebc5b2f36ec38]
   to [50bbb499093aad43247d7a8cb5ca0b4202ce4ed7]

patch "src/merge_roster.hh"
 from [cd2da3b06f595187a27622a98580b0636aabea4b]
   to [b097fbd5bdf41f60737ffff0288034696f7d50c7]

patch "src/merkle_tree.cc"
 from [681a6488e15edc8495664d188928fe122caa8bb0]
   to [1a6222f82bb1812d451479e12a8fa552aaef1550]

patch "src/merkle_tree.hh"
 from [7ae6a892d28c901e06986c03c90b54a9acf5102d]
   to [2ecb86860e435eaca18ca83de9b061cd05b86626]

patch "src/migrate_ancestry.cc"
 from [e193114e2881198a24c5d7ed9a7ad8742aa0f191]
   to [007f5268fed64add67cd6943a9d970676f370bc6]

patch "src/migrate_schema.cc"
 from [6795de38c20c625b5f5d955cc4beb26b243dcf57]
   to [8f1fa51a198ce29bf1da3043c7f523b8c12197b0]

patch "src/migration.hh"
 from [df4687ee1932044712b9b44386dcf3c7ff3c4f22]
   to [3b9ed0000610e84ed5f33590c0cf4dfb56d358f7]

patch "src/mkstemp.cc"
 from [e4bc406d8843b959ece7978ea83fb29889157646]
   to [035e5522db72d472b4ad9db562f861349b626e7f]

patch "src/monotone.cc"
 from [a25ecdd170a601db798b63a60add7681609a34c4]
   to [26270104ceb434f9f4606645542f9777693067d3]

patch "src/mt_version.cc"
 from [e73c0a25e76ba110976a4a0b94f39f5295c1dd75]
   to [f0098d0c1b96e6719c2c52e0991fbd1d0263e6b8]

patch "src/mtn-sanity.cc"
 from [d6b11b878814b0a40a4dd0ae4e0f666566a1d8aa]
   to [1636e5bce6c325a2cf9103dbbb36d538a5b8aeb6]

patch "src/mtn-sanity.hh"
 from [4703095d226cf4f77a1d5ac4c1142963f193bcad]
   to [5d51215a5bdea565cf83c2c759d2db503c581a76]

patch "src/netcmd.cc"
 from [7adb348671629350233209183dab3332410f91d0]
   to [d49554a3bf89dee69513892d64633132445b4572]

patch "src/netcmd.hh"
 from [6ac2e5bcf92deae29cbad403b4a0397f0a882734]
   to [7979d0ab7dfd3877465cc36ae1848a139eba5630]

patch "src/netio.hh"
 from [05953ebcfb8b75c0357ef7866e122d4f68c3d90d]
   to [df8eaeb31e5fb3368b99e55dd3500738e437ea7d]

patch "src/netsync.cc"
 from [55f9aef4302b12a06baef3ae0088fb07431e226f]
   to [2fda9e3fb1c251ef804755f51c63e407e011ee40]

patch "src/netxx_pipe.cc"
 from [ca933b395822bb54de954e82fce6e1f012037cf9]
   to [a823fe1d4f1171e07d596ae58852a42907a89d39]

patch "src/netxx_pipe.hh"
 from [f0b5ed2e06bcfd5dce87d3a5b59d0ec145c3228c]
   to [aada41549bd6b3a922c6ea62fbf1fc1eec655fd1]

patch "src/numeric_vocab.hh"
 from [857d25e240304dd485efc94ada24a1774eaf78d2]
   to [e9ed3d772acc4820f0ef6a88d20f060c93ee51b6]

patch "src/option.cc"
 from [7d0c3b6ca2eccfcdb1907e5498abdd74507a3612]
   to [407ac49827148579e4fbb74cb47cb20fa4b07523]

patch "src/option.hh"
 from [853a798c3150eef1824a73daca05d1e1cf1eb3f8]
   to [b671c64d0baad05da771208066080da34bb860e1]

patch "src/option_reset_info.hh"
 from [a11b67e3d5c52197a2228224b8927b2d27471b3a]
   to [b699f762793a213437bd8619ac11a8d22b32b54f]

patch "src/options.cc"
 from [aa1b83a97c8af2aaedfa061447d8a12a2be04e01]
   to [8938e79d5a5996bc1ec7fa353456cd79cde1deee]

patch "src/options.hh"
 from [a1ab6f9b8cd25fcb3338cc78b31d87d9e6df2026]
   to [68367508db1af92409cbd823946cb501d60111ee]

patch "src/options_applicator.cc"
 from [5407017eb222eb329f9e22fc465d4f36743d37ee]
   to [315da3c09be39431aa9bb19e69c0e28c993e5b59]

patch "src/options_list.hh"
 from [0462e302b89179f4acb28ecb91f4255140d4a4a7]
   to [bb5041345a606db989cfb73fa66144da6a8ceb33]

patch "src/origin_type.hh"
 from [0e329b2b10d081f52a8346044a6522a9fdc68552]
   to [60e015b70df9ab47c92125245b2d57f26a7560d1]

patch "src/packet.cc"
 from [f61360ed2524fdf53411bd24d022a2a3c9e6e9c7]
   to [a49c13b27b42b100628d5badf57302a53eb3cc4e]

patch "src/parallel_iter.hh"
 from [4901d0d1b3daece3f0b292c4343de8634814522e]
   to [5d108bc8dfcbf26711e06c39fa4e09a9a83a4547]

patch "src/paths.cc"
 from [f4244d2b15de4c1b8005fa8b244fe8391ce61d1b]
   to [971f5fff7b22a3b8574036742e66fdad28295f1e]

patch "src/paths.hh"
 from [2939f11db21e23d68530ed7415b9bdfa8dfd5a20]
   to [e0017f5b8e0ee0185f58ec2a41e3770413a2de51]

patch "src/pcrewrap.cc"
 from [7f5bd4e7d8d9e02f2b0271fa9a0f64dcbce2467d]
   to [18585099e76610c71edbf2f012546c1556b0edd0]

patch "src/pcrewrap.hh"
 from [99182ce16ab33a2188f00c5f355e8e1e9d0d52e5]
   to [0e242e97778053474920e79475906679e2cadb3c]

patch "src/platform-wrapped.hh"
 from [56fdcf095ffef702042d9483aaeb62bc832369a1]
   to [a13edab3d54b35ab15b967b6e9903af0d850a6fb]

patch "src/platform.hh"
 from [4b86bdcb86f3976266a0292855ef4da2ca313271]
   to [8e0d9691e67cfe829b5ccc49daaabff22640ffaa]

patch "src/project.cc"
 from [f3ba624e5361749e52f409da2fb6d6cb3455647d]
   to [c97e54e434e9fda218a5d35a338899d8cbf7292a]

patch "src/project.hh"
 from [a156012f665b7608020320598c08541ee340cd3c]
   to [18ee94d486bf013f7342320201f9790a8cd26f14]

patch "src/rcs_file.cc"
 from [885b3fbe7b6cfed78816f0e57cd71d44616213c6]
   to [db458f88c1523182b66edb6491361404a254abb6]

patch "src/rcs_file.hh"
 from [23731152c2a5ac9eb8b6573eec50cd00a8522f54]
   to [cce84c91f063e91f836d310a7a9f6fd8e2534405]

patch "src/rcs_import.cc"
 from [c2d6d57e4896d8c3e53821e7619933029c83f7e7]
   to [9b8acf7000b274e7de7aab2cdc8e999bf31b53e6]

patch "src/refiner.cc"
 from [2de8131df2595d5bcff5efe133ca1f768006d38f]
   to [a589006f39a5cc197ccbf4ccd3bda36c06e9e99d]

patch "src/refiner.hh"
 from [543f5789aa9f74593f909e4b80cea986639228b4]
   to [0e206de6031f9af075210d259bbe5b64cca11efa]

patch "src/restrictions.cc"
 from [211bf4562ebbe67950b900a45a28bf0c91395439]
   to [dbcf9e72fc438f1ac922d2d8b3917610fa4e6e53]

patch "src/restrictions.hh"
 from [6cc851724aa78f04c22327a1a82dfe4facbca43a]
   to [5a4b84eb11595378823d7e654c4d235294c8758e]

patch "src/rev_output.cc"
 from [a2c70b893b31296917d1a2b974faa1da46c13f1e]
   to [8dd2d5d36450fae54e392ff5b3aea83d7deff36c]

patch "src/rev_output.hh"
 from [666dd3ed35e16d8b122b4932c2aad05a21a22e25]
   to [9f9e97b2e2168f99b6af7c7f941f02de9a621a29]

patch "src/revision.hh"
 from [740c4dd4ee350fcf06af3ba707cef3dadecb46f8]
   to [44d34f9c51f853422649de195ba7e592df4382fe]

patch "src/roster.cc"
 from [b4cec49faa1928388c7ab0ae1e2f389b202270b0]
   to [3f5e134f840ac157909419f156b9e24ebbb5d10a]

patch "src/roster.hh"
 from [2b498673424f4d69f74cdb1486b9f8b051766e34]
   to [a57935b6e7ff282dde8f4e8adc3651979153c5b2]

patch "src/roster_delta.cc"
 from [a8bba8fe7ce5ac1b18b996a6501727065304a526]
   to [6dc49f6012825875f1466f245c018869faee9052]

patch "src/sanity.cc"
 from [17e9e70774f064cfb8ef4e25075a1f493b63cd78]
   to [e9f64040e8c42e6d3a88966ac2825e50aaedd910]

patch "src/sanity.hh"
 from [1f70fd2034c34f0c83a683a59362fb8d1dbac625]
   to [f6540f47e37f207dab7225918e638a6b10094a1e]

patch "src/selectors.cc"
 from [3efba0ce3139290b4088cea0d6e82a912565471e]
   to [daf19249fc9a9723bb4a07f2d418a86b3d9def9f]

patch "src/sha1.cc"
 from [5e1aa972d7c7d66e06320b039989652b830dcd75]
   to [a3946859601e0531b7f4b74c9a262df794a87fc4]

patch "src/simplestring_xform.cc"
 from [e779da92dc0c8e2f09d5800d26073d4d2ca5d130]
   to [bcf82fe5bf8eb104f827c247f5f4e84a4d66d5a1]

patch "src/simplestring_xform.hh"
 from [ab0d125445f13982344bbe3f713e2ce90d11cb66]
   to [82ae3326bc09ab0f08ba6a424f1952cd9e7f1b53]

patch "src/specialized_lexical_cast.cc"
 from [21b197949d6fea0426a7e22a61baf4c2fceff758]
   to [316e542247b7872612fa32ad692d37b9783e569d]

patch "src/ssh_agent.cc"
 from [6d188e7012a0b82f782563b09bbd7adf3f70cf75]
   to [6db021ac057c655aaae66371b4af8c2e5f28dc66]

patch "src/string_queue.hh"
 from [5053a58d5e02d505c19a02b2101dc5876d5bfbaa]
   to [14f6e3629d400cd90d96bf435563bb3cd62f51d3]

patch "src/transforms.cc"
 from [cdfb6854ef4992faba21074145f3c8269b845b11]
   to [4702233125ffc5c75646d22cab4cda8298d3c3d5]

patch "src/transforms.hh"
 from [301029c9284881c127a16fc610a8c22eb670cdde]
   to [4d6ebfd400a6e83d882f3281fb0f1a70e92f5b61]

patch "src/ui.cc"
 from [8bdaebf0da3e90cbe0026d140c2000e92998693f]
   to [82328f23df1272f1b6e9e63141d73b4807bf6b2d]

patch "src/ui.hh"
 from [405d3582ac4c576a9ad0a6f8885eb1ac12be2576]
   to [380cc7b8d1c4e8c571b6d952497700acf383d35c]

patch "src/uri.cc"
 from [f50e7599f6294a9872c055a8b230ea4933bdcc82]
   to [ceac570deba6cb715b8a8f6ffea5ec92ce159fb5]

patch "src/vocab.cc"
 from [4c73af08de6dd30f710896835673361ee019466e]
   to [be19f581df388e5cac09c0d0cb97714ce43b34f4]

patch "src/vocab_cast.hh"
 from [9710931398c21cebf7b008e4982e5cfc2f2b6614]
   to [8050d9584166063f05430c115a1bff0cdaa0147d]

patch "src/work.cc"
 from [36e5dcda8cf09c9054cb88e6165707112ba9ac03]
   to [8815821392dd36a8fb3e71583615b278e4ca80b2]

patch "src/xdelta.cc"
 from [5094e62fb1c7881a0b8da6331f2fc7488a342146]
   to [1cf88b0bc098831ed583d2dc60fc4288a08b96b0]

old_revision [c605363c7792a1f0c09c792ec8a0cb49c8ab257d]

patch "test/func/db_opt_fallback_mechanisms/__driver__.lua"
 from [09354a970b921effa2850aa4ad722ce95a433ea8]
   to [0b0da944f19f7a0be42165069fdc1bb8862fd6ee]

patch "test/func/serve-automate/__driver__.lua"
 from [d41f819fd49060c45bb154dde614699b0cb938e7]
   to [ae4b03baf16d8401f31a32c8377021f879788d01]
============================================================
--- src/adler32.hh	a1669df544fd3ccc233417b87b7bfaa447e63071
+++ src/adler32.hh	9159cac29f5622d49e192499c8a3dfaf42dd0d37
@@ -18,7 +18,7 @@ struct
 #include "sanity.hh"
 
 struct
-adler32
+  adler32
 {
   u32 s1, s2, len;
   static const u32 mask = 0xffff;
@@ -30,7 +30,7 @@ adler32
 
   inline void in(u8 c)
   {
-    s1 += widen<u32,u8>(c);
+    s1 += widen<u32, u8>(c);
     s1 &= mask;
     s2 += s1;
     s2 &= mask;
@@ -39,9 +39,9 @@ adler32
 
   inline void out(u8 c)
   {
-    s1 -= widen<u32,u8>(c);
+    s1 -= widen<u32, u8>(c);
     s1 &= mask;
-    s2 -= (len * widen<u32,u8>(c)) + 1;
+    s2 -= (len * widen<u32, u8>(c)) + 1;
     s2 &= mask;
     --len;
   }
@@ -59,7 +59,7 @@ adler32
     // and (for s2) (maxs1 = 255*255)*255 < 0xffff_ffff
     while (count--)
       {
-        u32 c = widen<u32,u8>(*(ch++));
+        u32 c = widen<u32, u8>(*(ch++));
         s1 += c;
         s2 += s1;
       }
============================================================
--- src/cert.cc	8d074780068206b16f302c9e5b0ab6da609f3448
+++ src/cert.cc	b3236ff082a766473a15a631b7f4802a6e1f8df7
@@ -21,13 +21,13 @@ cert::operator<(cert const & other) cons
 cert::operator<(cert const & other) const
 {
   return (ident < other.ident)
-    || ((ident == other.ident) && name < other.name)
-    || (((ident == other.ident) && name == other.name)
-        && value < other.value)
-    || ((((ident == other.ident) && name == other.name)
-         && value == other.value) && key < other.key)
-    || (((((ident == other.ident) && name == other.name)
-          && value == other.value) && key == other.key) && sig < other.sig);
+         || ((ident == other.ident) && name < other.name)
+         || (((ident == other.ident) && name == other.name)
+             && value < other.value)
+         || ((((ident == other.ident) && name == other.name)
+              && value == other.value) && key < other.key)
+         || (((((ident == other.ident) && name == other.name)
+               && value == other.value) && key == other.key) && sig < other.sig);
 }
 
 bool
@@ -54,7 +54,7 @@ read_cert(database & db, string const & 
                                  "cert hash"),
                origin::network);
   revision_id ident = revision_id(extract_substring(in, pos,
-                                  constants::merkle_hash_length_in_bytes,
+                                                    constants::merkle_hash_length_in_bytes,
                                                     "cert ident"),
                                   origin::network);
   string name, val, key, sig;
@@ -76,44 +76,44 @@ read_cert(database & db, string const & 
   switch(ver)
     {
     case read_cert_v6:
-      {
-        keyname = key_name(key, origin::network);
-        bool found = false;
-        std::vector<key_id> all_keys;
-        db.get_key_ids(all_keys);
-        for (std::vector<key_id>::const_iterator i = all_keys.begin();
-             i != all_keys.end(); ++i)
-          {
-            key_name i_keyname;
-            rsa_pub_key pub;
-            db.get_pubkey(*i, i_keyname, pub);
-            if (i_keyname() == key)
-              {
-                if(db.check_signature(*i, signable, tmp.sig) == cert_ok)
-                  {
-                    tmp.key = *i;
-                    found = true;
-                    break;
-                  }
-              }
-          }
-        if (!found)
-          {
-            return false;
-          }
-      }
-      break;
+    {
+      keyname = key_name(key, origin::network);
+      bool found = false;
+      std::vector<key_id> all_keys;
+      db.get_key_ids(all_keys);
+      for (std::vector<key_id>::const_iterator i = all_keys.begin();
+           i != all_keys.end(); ++i)
+        {
+          key_name i_keyname;
+          rsa_pub_key pub;
+          db.get_pubkey(*i, i_keyname, pub);
+          if (i_keyname() == key)
+            {
+              if(db.check_signature(*i, signable, tmp.sig) == cert_ok)
+                {
+                  tmp.key = *i;
+                  found = true;
+                  break;
+                }
+            }
+        }
+      if (!found)
+        {
+          return false;
+        }
+    }
+    break;
     case read_cert_current:
-      {
-        rsa_pub_key pub;
-        tmp.key = key_id(key, origin::network);
-        db.get_pubkey(tmp.key, keyname, pub);
-        if (db.check_signature(tmp.key, signable, tmp.sig) != cert_ok)
-          {
-            return false;
-          }
-      }
-      break;
+    {
+      rsa_pub_key pub;
+      tmp.key = key_id(key, origin::network);
+      db.get_pubkey(tmp.key, keyname, pub);
+      if (db.check_signature(tmp.key, signable, tmp.sig) != cert_ok)
+        {
+          return false;
+        }
+    }
+    break;
     default:
       I(false);
     }
============================================================
--- src/cleanup.hh	34bc7dde900a7f6d91534821886f91d1f9409409
+++ src/cleanup.hh	0afd087eff03aec913a00377e1659fc42de10dad
@@ -14,7 +14,8 @@ template <typename T, typename R>
 // from our helper C libraries are deallocated when we leave a scope.
 
 template <typename T, typename R>
-struct cleanup_ptr {
+struct cleanup_ptr
+{
   T ptr;
   R (* cleanup)(T);
   explicit cleanup_ptr(T p, R (*c)(T)) : ptr(p), cleanup(c) {}
============================================================
--- src/commands.cc	d63ab460bfd07e887646fb4e5ead6a5ab4c2d0c8
+++ src/commands.cc	d90078c4696fc5ca4acc805e3258e2395466f518
@@ -121,7 +121,7 @@ namespace commands
   command::allow_completion() const
   {
     return m_allow_completion &&
-      (m_parent?m_parent->allow_completion():true);
+           (m_parent ? m_parent->allow_completion() : true);
   }
 
   command_id
@@ -152,7 +152,7 @@ namespace commands
   }
 
   void
-  command::add_alias(const utf8 &new_name)
+  command::add_alias(const utf8 & new_name)
   {
     m_names.insert(new_name);
   }
@@ -192,7 +192,7 @@ namespace commands
   command::desc() const
   {
     if (m_desc().empty())
-        return abstract() + ".";
+      return abstract() + ".";
     return abstract() + ".\n" + safe_gettext(m_desc().c_str());
   }
 
@@ -202,7 +202,7 @@ namespace commands
     names_set set;
     init_children();
     for (children_set::const_iterator i = m_children.begin();
-      i != m_children.end(); i++)
+         i != m_children.end(); i++)
       {
         if ((*i)->hidden() && !hidden)
           continue;
@@ -321,7 +321,7 @@ namespace commands
   map< command_id, command * >
   command::find_completions(utf8 const & prefix, command_id const & completed,
                             bool completion_ok)
-    const
+  const
   {
     map< command_id, command * > matches;
 
@@ -354,8 +354,8 @@ namespace commands
             // while we list hidden commands with a special option,
             // we never want to give them as possible completions
             if (!child->hidden() &&
-                     prefix().length() < (*iter2)().length() &&
-                     allow_completion() && completion_ok)
+                prefix().length() < (*iter2)().length() &&
+                allow_completion() && completion_ok)
               {
                 string temp((*iter2)(), 0, prefix().length());
                 utf8 p(temp, origin::internal);
@@ -382,9 +382,9 @@ namespace commands
     command_id remaining(id.begin() + 1, id.end());
 
     map< command_id, command * >
-      m2 = find_completions(component,
-                            completed,
-                            allow_completion() && completion_ok);
+    m2 = find_completions(component,
+                          completed,
+                          allow_completion() && completion_ok);
     for (map< command_id, command * >::const_iterator iter = m2.begin();
          iter != m2.end(); iter++)
       {
@@ -504,7 +504,7 @@ namespace commands
         I(matches.size() > 1);
         string err =
           (F("'%s' is ambiguous; possible completions are:") %
-             join_words(id)()).str();
+           join_words(id)()).str();
         for (set< command_id >::const_iterator iter = matches.begin();
              iter != matches.end(); iter++)
           err += '\n' + join_words(*iter)();
============================================================
--- src/commands.hh	23533dfe704f8fae0492404a0188363fea27d9c1
+++ src/commands.hh	427e72f771ee3986eae0bd76558dd41e94964fe9
@@ -19,7 +19,8 @@ class utf8;
 // possibly from the command line and possibly internal scripting if we ever
 // bind tcl or lua or something in here
 
-namespace commands {
+namespace commands
+{
   typedef std::vector< utf8 > command_id;
   class command;
   class automate;
============================================================
--- src/cycle_detector.hh	f77b04f6c75c70dbb8e99265aa3d36d0f2f244af
+++ src/cycle_detector.hh	04d5b7dfbeec25955f4abc4c0c815417529b11db
@@ -23,8 +23,8 @@ struct cycle_detector
 
   typedef std::vector< T > edge_vec;
   typedef std::vector <edge_vec > edge_map;
-  typedef std::pair <typename edge_vec::const_iterator,
-                     typename edge_vec::const_iterator> state;
+  typedef std::pair < typename edge_vec::const_iterator,
+          typename edge_vec::const_iterator > state;
   typedef std::stack <state > edge_stack;
 
   edge_map edges;
@@ -48,13 +48,13 @@ struct cycle_detector
   bool edge_makes_cycle(T const & src, T const & dst)
   {
     if (src == dst)
-        return true;
+      return true;
 
     if (dst >= edges.size() || edges.at(dst).empty())
-        return false;
+      return false;
 
     if (global_in_edges.find(src) == global_in_edges.end())
-        return false;
+      return false;
 
     while (!stk.empty())
       stk.pop();
@@ -83,7 +83,7 @@ struct cycle_detector
               }
           }
         if (!pushed)
-            stk.pop();
+          stk.pop();
       }
     return false;
   }
============================================================
--- src/database.cc	4c259f963c440fc95564dfec99b2f832f0bad643
+++ src/database.cc	fd145c90f06d2e0eb22409c786d76d53b6a4161a
@@ -129,7 +129,8 @@ namespace
       {
         I(*i >= 10 && *i < 127);
       }
-    query_param q = {
+    query_param q =
+    {
       query_param::text,
       txt,
       0,
@@ -140,7 +141,8 @@ namespace
   query_param
   blob(string const & blb)
   {
-    query_param q = {
+    query_param q =
+    {
       query_param::blob,
       blb,
       0,
@@ -151,7 +153,8 @@ namespace
   query_param
   int64(u64 const & num)
   {
-    query_param q = {
+    query_param q =
+    {
       query_param::int64,
       "",
       num,
@@ -184,7 +187,7 @@ namespace
   {
     statement() : count(0), stmt(0, sqlite3_finalize) {}
     int count;
-    cleanup_ptr<sqlite3_stmt*, int> stmt;
+    cleanup_ptr<sqlite3_stmt *, int> stmt;
   };
 
   struct roster_size_estimator
@@ -207,15 +210,16 @@ namespace
   enum open_mode { normal_mode = 0,
                    schema_bypass_mode,
                    format_bypass_mode,
-                   cache_bypass_mode };
+                   cache_bypass_mode
+                 };
 
   typedef hashmap::hash_map<revision_id, set<revision_id> > parent_id_map;
   typedef hashmap::hash_map<revision_id, rev_height> height_map;
 
-  typedef hashmap::hash_map<key_id,
-                            pair<shared_ptr<Botan::PK_Verifier>,
-                                 shared_ptr<Botan::RSA_PublicKey> >
-                            > verifier_cache;
+  typedef hashmap::hash_map < key_id,
+          pair < shared_ptr<Botan::PK_Verifier>,
+          shared_ptr<Botan::RSA_PublicKey> >
+          > verifier_cache;
 
 } // anonymous namespace
 
@@ -269,8 +273,8 @@ private:
   //
   string count(string const & table);
   string space(string const & table,
-                    string const & concatenated_columns,
-                    u64 & total);
+               string const & concatenated_columns,
+               u64 & total);
   unsigned int page_size();
   unsigned int cache_size();
 
@@ -290,9 +294,9 @@ private:
     roster_writeback_manager(database_impl & imp) : imp(imp) {}
     void writeout(revision_id const &, cached_roster const &);
   };
-  LRUWritebackCache<revision_id, cached_roster,
-                    roster_size_estimator, roster_writeback_manager>
-    roster_cache;
+  LRUWritebackCache < revision_id, cached_roster,
+                    roster_size_estimator, roster_writeback_manager >
+                    roster_cache;
 
   bool have_delayed_file(file_id const & id);
   void load_delayed_file(file_id const & id, file_data & dat);
@@ -448,7 +452,7 @@ void
 #ifdef SUPPORT_SQLITE_BEFORE_3003014
 // SQLite versions up to and including 3.3.12 didn't have the hex() function
 void
-sqlite3_hex_fn(sqlite3_context *f, int nargs, sqlite3_value **args)
+sqlite3_hex_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   if (nargs != 1)
     {
@@ -462,7 +466,7 @@ sqlite3_hex_fn(sqlite3_context *f, int n
   try
     {
       decoded = encode_hexenc(reinterpret_cast<char const *>(
-        sqlite3_value_text(args[0])), origin::database);
+                                sqlite3_value_text(args[0])), origin::database);
     }
   catch (recoverable_failure & e)
     {
@@ -537,8 +541,8 @@ database::init()
     {
       L(FL("creating new database_impl instance for %s") % dbpath);
       dbcache.insert(make_pair(dbpath, boost::shared_ptr<database_impl>(
-        new database_impl(dbpath, opts.dbname_type, opts.roster_cache_performance_log)
-      )));
+                                 new database_impl(dbpath, opts.dbname_type, opts.roster_cache_performance_log)
+                               )));
     }
 
   imp = dbcache[dbpath];
@@ -653,7 +657,7 @@ static void
 }
 
 static void
-sqlite3_gunzip_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
+sqlite3_gunzip_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   if (nargs != 1)
     {
@@ -661,9 +665,9 @@ sqlite3_gunzip_fn(sqlite3_context *f, in
       return;
     }
   data unpacked;
-  const char *val = (const char*) sqlite3_value_blob(args[0]);
+  const char * val = (const char *) sqlite3_value_blob(args[0]);
   int bytes = sqlite3_value_bytes(args[0]);
-  decode_gzip(gzip<data>(string(val,val+bytes), origin::database), unpacked);
+  decode_gzip(gzip<data>(string(val, val + bytes), origin::database), unpacked);
   sqlite3_result_blob(f, unpacked().c_str(), unpacked().size(), SQLITE_TRANSIENT);
 }
 
@@ -715,7 +719,7 @@ database::initialize()
   imp->check_db_nonexistent();
   imp->open();
 
-  sqlite3 *sql = imp->__sql;
+  sqlite3 * sql = imp->__sql;
 
   sqlite3_exec(sql, schema_constant, NULL, NULL, NULL);
   assert_sqlite3_ok(sql);
@@ -731,15 +735,15 @@ struct
 }
 
 struct
-dump_request
+  dump_request
 {
   dump_request() : sql(), out() {};
-  struct sqlite3 *sql;
-  ostream *out;
+  struct sqlite3 * sql;
+  ostream * out;
 };
 
 static void
-dump_row(ostream &out, sqlite3_stmt *stmt, string const& table_name)
+dump_row(ostream & out, sqlite3_stmt * stmt, string const & table_name)
 {
   out << FL("INSERT INTO %s VALUES(") % table_name;
   unsigned n = sqlite3_data_count(stmt);
@@ -751,20 +755,20 @@ dump_row(ostream &out, sqlite3_stmt *stm
       if (sqlite3_column_type(stmt, i) == SQLITE_BLOB)
         {
           out << "X'";
-          const char *val = (const char*) sqlite3_column_blob(stmt, i);
+          const char * val = (const char *) sqlite3_column_blob(stmt, i);
           int bytes = sqlite3_column_bytes(stmt, i);
-          out << encode_hexenc(string(val,val+bytes), origin::internal);
+          out << encode_hexenc(string(val, val + bytes), origin::internal);
           out << '\'';
         }
       else
         {
-          const unsigned char *val = sqlite3_column_text(stmt, i);
+          const unsigned char * val = sqlite3_column_text(stmt, i);
           if (val == NULL)
             out << "NULL";
           else
             {
               out << '\'';
-              for (const unsigned char *cp = val; *cp; ++cp)
+              for (const unsigned char * cp = val; *cp; ++cp)
                 {
                   if (*cp == '\'')
                     out << "''";
@@ -779,9 +783,9 @@ static int
 }
 
 static int
-dump_table_cb(void *data, int n, char **vals, char **cols)
+dump_table_cb(void * data, int n, char ** vals, char ** cols)
 {
-  dump_request *dump = reinterpret_cast<dump_request *>(data);
+  dump_request * dump = reinterpret_cast<dump_request *>(data);
   I(dump != NULL);
   I(dump->sql != NULL);
   I(vals != NULL);
@@ -793,7 +797,7 @@ dump_table_cb(void *data, int n, char **
   *(dump->out) << vals[2] << ";\n";
   string table_name(vals[0]);
   string query = "SELECT * FROM " + table_name;
-  sqlite3_stmt *stmt = 0;
+  sqlite3_stmt * stmt = 0;
   sqlite3_prepare_v2(dump->sql, query.c_str(), -1, &stmt, NULL);
   assert_sqlite3_ok(dump->sql);
 
@@ -813,9 +817,9 @@ static int
 }
 
 static int
-dump_index_cb(void *data, int n, char **vals, char **cols)
+dump_index_cb(void * data, int n, char ** vals, char ** cols)
 {
-  dump_request *dump = reinterpret_cast<dump_request *>(data);
+  dump_request * dump = reinterpret_cast<dump_request *>(data);
   I(dump != NULL);
   I(dump->sql != NULL);
   I(vals != NULL);
@@ -829,9 +833,9 @@ static int
 }
 
 static int
-dump_user_version_cb(void *data, int n, char **vals, char **cols)
+dump_user_version_cb(void * data, int n, char ** vals, char ** cols)
 {
-  dump_request *dump = reinterpret_cast<dump_request *>(data);
+  dump_request * dump = reinterpret_cast<dump_request *>(data);
   I(dump != NULL);
   I(dump->sql != NULL);
   I(vals != NULL);
@@ -854,17 +858,17 @@ database::dump(ostream & out)
     out << "BEGIN EXCLUSIVE;\n";
     int res;
     res = sqlite3_exec(req.sql,
-                          "SELECT name, type, sql FROM sqlite_master "
-                          "WHERE type='table' AND sql NOT NULL "
-                          "AND name not like 'sqlite_stat%' "
-                          "ORDER BY name",
-                          dump_table_cb, &req, NULL);
+                       "SELECT name, type, sql FROM sqlite_master "
+                       "WHERE type='table' AND sql NOT NULL "
+                       "AND name not like 'sqlite_stat%' "
+                       "ORDER BY name",
+                       dump_table_cb, &req, NULL);
     assert_sqlite3_ok(req.sql);
     res = sqlite3_exec(req.sql,
-                          "SELECT name, type, sql FROM sqlite_master "
-                          "WHERE type='index' AND sql NOT NULL "
-                          "ORDER BY name",
-                          dump_index_cb, &req, NULL);
+                       "SELECT name, type, sql FROM sqlite_master "
+                       "WHERE type='index' AND sql NOT NULL "
+                       "ORDER BY name",
+                       dump_index_cb, &req, NULL);
     assert_sqlite3_ok(req.sql);
     res = sqlite3_exec(req.sql,
                        "PRAGMA user_version;",
@@ -1030,7 +1034,7 @@ database::info(ostream & out, bool analy
     try
       {
         imp->fetch(res, one_col, any_rows,
-              query("SELECT node FROM next_roster_node_number"));
+                   query("SELECT node FROM next_roster_node_number"));
         if (res.empty())
           counts.push_back("0");
         else
@@ -1050,25 +1054,25 @@ database::info(ostream & out, bool analy
   {
     u64 total = 0;
     bytes.push_back(imp->space("rosters",
-                          "length(id) + length(checksum) + length(data)",
-                          total));
+                               "length(id) + length(checksum) + length(data)",
+                               total));
     bytes.push_back(imp->space("roster_deltas",
-                          "length(id) + length(checksum)"
-                          "+ length(base) + length(delta)", total));
+                               "length(id) + length(checksum)"
+                               "+ length(base) + length(delta)", total));
     bytes.push_back(imp->space("files", "length(id) + length(data)", total));
     bytes.push_back(imp->space("file_deltas",
-                          "length(id) + length(base) + length(delta)", total));
+                               "length(id) + length(base) + length(delta)", total));
     bytes.push_back(imp->space("file_sizes",
-                          "length(id) + length(size)", total));
+                               "length(id) + length(size)", total));
     bytes.push_back(imp->space("revisions", "length(id) + length(data)", total));
     bytes.push_back(imp->space("revision_ancestry",
-                          "length(parent) + length(child)", total));
+                               "length(parent) + length(child)", total));
     bytes.push_back(imp->space("revision_certs",
-                          "length(hash) + length(revision_id) + length(name)"
-                          "+ length(value) + length(keypair_id)"
-                          "+ length(signature)", total));
+                               "length(hash) + length(revision_id) + length(name)"
+                               "+ length(value) + length(keypair_id)"
+                               "+ length(signature)", total));
     bytes.push_back(imp->space("heights", "length(revision) + length(height)",
-                          total));
+                               total));
     bytes.push_back((F("%u") % total).str());
   }
 
@@ -1076,7 +1080,7 @@ database::info(ostream & out, bool analy
   // same length
   {
     string::size_type width
-      = max_element(counts.begin(), counts.end(), longest_number)->length();
+    = max_element(counts.begin(), counts.end(), longest_number)->length();
     for(vector<string>::iterator i = counts.begin(); i != counts.end(); i++)
       if (width > i->length() && (*i)[0] != '[')
         i->insert(0U, width - i->length(), ' ');
@@ -1114,7 +1118,7 @@ database::info(ostream & out, bool analy
       "database:\n"
       "  page size       : %s\n"
       "  cache size      : %s"
-      );
+     );
 
   form = form % format_creator_code(ccode);
   form = form % describe_sql_schema(imp->__sql);
@@ -1257,7 +1261,7 @@ database::info(ostream & out, bool analy
       "  90th percentile : %s sec\n"
       "  95th percentile : %s sec\n"
       "  99th percentile : %s sec\n"
-      );
+     );
 
   form = form % correct % equal % incorrect % root_anc % missing;
 
@@ -1267,17 +1271,17 @@ database::info(ostream & out, bool analy
   // calculate mean time difference, output that, min and max
   s64 mean = accumulate(diffs.begin(), diffs.end(), 0);
   mean /= diffs.size();
-  s64 median = *(diffs.begin() + diffs.size()/2);
+  s64 median = *(diffs.begin() + diffs.size() / 2);
   form = form % mean % *diffs.begin() % *diffs.rbegin()
-    % *(diffs.begin() + int(diffs.size() * 0.01))
-    % *(diffs.begin() + int(diffs.size() * 0.05))
-    % *(diffs.begin() + int(diffs.size() * 0.10))
-    % *(diffs.begin() + int(diffs.size() * 0.25))
-    % *(diffs.begin() + int(diffs.size() * 0.50))
-    % *(diffs.begin() + int(diffs.size() * 0.75))
-    % *(diffs.begin() + int(diffs.size() * 0.90))
-    % *(diffs.begin() + int(diffs.size() * 0.95))
-    % *(diffs.begin() + int(diffs.size() * 0.99));
+         % *(diffs.begin() + int(diffs.size() * 0.01))
+         % *(diffs.begin() + int(diffs.size() * 0.05))
+         % *(diffs.begin() + int(diffs.size() * 0.10))
+         % *(diffs.begin() + int(diffs.size() * 0.25))
+         % *(diffs.begin() + int(diffs.size() * 0.50))
+         % *(diffs.begin() + int(diffs.size() * 0.75))
+         % *(diffs.begin() + int(diffs.size() * 0.90))
+         % *(diffs.begin() + int(diffs.size() * 0.95))
+         % *(diffs.begin() + int(diffs.size() * 0.99));
 
   // output the string, with some newlines out of translation
   out << '\n' << '\n' << form.str() << '\n';
@@ -1426,9 +1430,9 @@ database_impl::fetch(results & res,
 
 void
 database_impl::fetch(results & res,
-                      int const want_cols,
-                      int const want_rows,
-                      query const & query)
+                     int const want_cols,
+                     int const want_rows,
+                     query const & query)
 {
   int nrow;
   int ncol;
@@ -1504,19 +1508,19 @@ database_impl::fetch(results & res,
                             SQLITE_STATIC);
           break;
         case query_param::blob:
-          {
-            string const & data = "" param - 1).string_data;
-            sqlite3_bind_blob(i->second.stmt(), param,
-                              data.data(), data.size(),
-                              SQLITE_STATIC);
-          }
-          break;
+        {
+          string const & data = "" param - 1).string_data;
+          sqlite3_bind_blob(i->second.stmt(), param,
+                            data.data(), data.size(),
+                            SQLITE_STATIC);
+        }
+        break;
         case query_param::int64:
-          {
-            u64 data = "" param - 1).int_data;
-            sqlite3_bind_int64(i->second.stmt(), param, data);
-          }
-          break;
+        {
+          u64 data = "" param - 1).int_data;
+          sqlite3_bind_int64(i->second.stmt(), param, data);
+        }
+        break;
         default:
           I(false);
         }
@@ -1537,15 +1541,18 @@ database_impl::fetch(results & res,
           int const datatype = sqlite3_column_type(i->second.stmt(), col);
           E(datatype != SQLITE_NULL, origin::database,
             F("null result in query: %s") % query.sql_cmd);
-          const char * value = (const char*)sqlite3_column_blob(i->second.stmt(), col);
+          const char * value = (const char *)sqlite3_column_blob(i->second.stmt(), col);
           int bytes = sqlite3_column_bytes(i->second.stmt(), col);
-          if (value) {
-            row.push_back(string(value, value + bytes));
-          } else {
-            // sqlite3_column_blob() returns null for zero-length
-            I(bytes == 0);
-            row.push_back(string());
-          }
+          if (value)
+            {
+              row.push_back(string(value, value + bytes));
+            }
+          else
+            {
+              // sqlite3_column_blob() returns null for zero-length
+              I(bytes == 0);
+              row.push_back(string());
+            }
         }
       res.push_back(row);
     }
@@ -1851,7 +1858,7 @@ database_impl::get_file_or_manifest_base
 
   gzip<data> rdata(res[0][0], origin::database);
   data rdata_unpacked;
-  decode_gzip(rdata,rdata_unpacked);
+  decode_gzip(rdata, rdata_unpacked);
 
   dat = rdata_unpacked;
 }
@@ -1927,7 +1934,7 @@ database_impl::write_delayed_file(file_i
 
 void
 database_impl::write_delayed_file(file_id const & ident,
-                                   file_data const & dat)
+                                  file_data const & dat)
 {
   gzip<data> dat_packed;
   encode_gzip(dat.inner(), dat_packed);
@@ -1946,8 +1953,8 @@ database_impl::write_delayed_roster(revi
 
 void
 database_impl::write_delayed_roster(revision_id const & ident,
-                                     roster_t const & roster,
-                                     marking_map const & marking)
+                                    roster_t const & roster,
+                                    marking_map const & marking)
 {
   roster_data dat;
   write_roster_and_marking(roster, marking, dat);
@@ -2000,8 +2007,8 @@ database_impl::put_roster_delta(revision
 
 void
 database_impl::put_roster_delta(revision_id const & ident,
-                                 revision_id const & base,
-                                 roster_delta const & del)
+                                revision_id const & base,
+                                roster_delta const & del)
 {
   gzip<delta> del_packed;
   encode_gzip(del.inner(), del_packed);
@@ -2031,7 +2038,7 @@ struct file_and_manifest_reconstruction_
   virtual bool is_base(id const & node) const
   {
     return imp.vcache.exists(node)
-      || imp.file_or_manifest_base_exists(node, data_table);
+           || imp.file_or_manifest_base_exists(node, data_table);
   }
   virtual void get_next(id const & from, set<id> & next) const
   {
@@ -2228,7 +2235,7 @@ database_impl::extract_from_deltas(revis
           if (found)
             return;
         }
-      if (i == path_length-1)
+      if (i == path_length - 1)
         {
           // last iteration, we have reached a roster base
           roster_t roster;
@@ -2344,7 +2351,7 @@ database::file_version_exists(file_id co
 database::file_version_exists(file_id const & id)
 {
   return delta_exists(id.inner(), "file_deltas")
-    || imp->file_or_manifest_base_exists(id.inner(), "files");
+         || imp->file_or_manifest_base_exists(id.inner(), "files");
 }
 
 bool
@@ -2357,7 +2364,7 @@ database::roster_version_exists(revision
 database::roster_version_exists(revision_id const & id)
 {
   return delta_exists(id.inner(), "roster_deltas")
-    || imp->roster_base_available(id);
+         || imp->roster_base_available(id);
 }
 
 bool
@@ -2450,32 +2457,32 @@ database::get_file_sizes(roster_t const 
       results res;
 
       size_t variables = all_file_ids.size() - i > max_variables
-        ? max_variables
-        : all_file_ids.size() - i;
+                         ? max_variables
+                         : all_file_ids.size() - i;
       I(variables > 0);
 
       query q;
       string placeholders = "";
-      for (size_t j=i; j< i + variables; ++j)
+      for (size_t j = i; j < i + variables; ++j)
         {
           placeholders += "?,";
           q.args.push_back(blob(all_file_ids[j].inner()()));
         }
 
       q.sql_cmd = "SELECT id, size FROM file_sizes "
-                  "WHERE id IN(" + placeholders +"null)";
+                  "WHERE id IN(" + placeholders + "null)";
 
       imp->fetch(res, 2, any_rows, q);
       I(!res.empty());
 
-      for (size_t k=0; k<res.size(); ++k)
+      for (size_t k = 0; k < res.size(); ++k)
         {
           file_id ident(res[k][0], origin::database);
           u64 size = lexical_cast<u64>(res[k][1]);
           sizes.insert(make_pair(ident, size));
         }
 
-      i+= variables;
+      i += variables;
     }
 }
 
@@ -2703,7 +2710,7 @@ database::get_revision_children(revision
   children.clear();
   imp->fetch(res, one_col, any_rows,
              query("SELECT child FROM revision_ancestry WHERE parent = ?")
-        % blob(id.inner()()));
+             % blob(id.inner()()));
   for (size_t i = 0; i < res.size(); ++i)
     children.insert(revision_id(res[i][0], origin::database));
 }
@@ -2725,7 +2732,7 @@ database::get_revision_manifest(revision
 
 void
 database::get_revision_manifest(revision_id const & rid,
-                               manifest_id & mid)
+                                manifest_id & mid)
 {
   revision_t rev;
   get_revision(rid, rev);
@@ -2770,8 +2777,8 @@ database::get_common_ancestors(std::set<
         {
           set<revision_id> common;
           set_intersection(ancestors.begin(), ancestors.end(),
-                         all_common_ancestors.begin(), all_common_ancestors.end(),
-                         inserter(common, common.begin()));
+                           all_common_ancestors.begin(), all_common_ancestors.end(),
+                           inserter(common, common.begin()));
           all_common_ancestors = common;
         }
     }
@@ -2853,7 +2860,7 @@ database::get_revision(revision_id const
   I(res.size() == 1);
   gzip<data> gzdata(res[0][0], origin::database);
   data rdat;
-  decode_gzip(gzdata,rdat);
+  decode_gzip(gzdata, rdat);
 
   // verify that we got a revision with the right id
   {
@@ -2938,7 +2945,7 @@ database::deltify_revision(revision_id c
          i != rev.edges.end(); ++i)
       {
         for (map<file_path, pair<file_id, file_id> >::const_iterator
-               j = edge_changes(i).deltas_applied.begin();
+             j = edge_changes(i).deltas_applied.begin();
              j != edge_changes(i).deltas_applied.end(); ++j)
           {
             file_id old_id(delta_entry_src(j));
@@ -2999,7 +3006,7 @@ database::put_revision(revision_id const
         }
 
       for (map<file_path, file_id>::const_iterator a
-             = edge_changes(i).files_added.begin();
+           = edge_changes(i).files_added.begin();
            a != edge_changes(i).files_added.end(); ++a)
         {
           if (! file_version_exists(a->second))
@@ -3011,7 +3018,7 @@ database::put_revision(revision_id const
         }
 
       for (map<file_path, pair<file_id, file_id> >::const_iterator d
-             = edge_changes(i).deltas_applied.begin();
+           = edge_changes(i).deltas_applied.begin();
            d != edge_changes(i).deltas_applied.end(); ++d)
         {
           I(!delta_entry_src(d).inner()().empty());
@@ -3101,9 +3108,9 @@ database::put_height_for_revision(revisi
       rev_height parent; MM(parent);
       get_rev_height(edge_old_revision(e), parent);
       if (parent > highest_parent)
-      {
-        highest_parent = parent;
-      }
+        {
+          highest_parent = parent;
+        }
     }
 
   // ... then find the first unused child
@@ -3431,24 +3438,24 @@ database::encrypt_rsa(key_id const & pub
 
   shared_ptr<X509_PublicKey> x509_key(Botan::X509::load_key(pub_block));
   shared_ptr<RSA_PublicKey> pub_key
-    = shared_dynamic_cast<RSA_PublicKey>(x509_key);
+  = shared_dynamic_cast<RSA_PublicKey>(x509_key);
   if (!pub_key)
     throw recoverable_failure(origin::system,
                               "Failed to get RSA encrypting key");
 
   shared_ptr<PK_Encryptor>
-    encryptor(get_pk_encryptor(*pub_key, "EME1(SHA-1)"));
+  encryptor(get_pk_encryptor(*pub_key, "EME1(SHA-1)"));
 
   SecureVector<Botan::byte> ct;
 
 #if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,7,7)
   ct = encryptor->encrypt(
-          reinterpret_cast<Botan::byte const *>(plaintext.data()),
-          plaintext.size(), lazy_rng::get());
+         reinterpret_cast<Botan::byte const *>(plaintext.data()),
+         plaintext.size(), lazy_rng::get());
 #else
   ct = encryptor->encrypt(
-          reinterpret_cast<Botan::byte const *>(plaintext.data()),
-          plaintext.size());
+         reinterpret_cast<Botan::byte const *>(plaintext.data()),
+         plaintext.size());
 #endif
   ciphertext = rsa_oaep_sha_data(string(reinterpret_cast<char const *>(ct.begin()),
                                         ct.size()),
@@ -3483,7 +3490,7 @@ database::check_signature(key_id const &
       L(FL("building verifier for %d-byte pub key") % pub_block.size());
       shared_ptr<X509_PublicKey> x509_key(Botan::X509::load_key(pub_block));
       shared_ptr<RSA_PublicKey> pub_key
-        = boost::shared_dynamic_cast<RSA_PublicKey>(x509_key);
+      = boost::shared_dynamic_cast<RSA_PublicKey>(x509_key);
 
       E(pub_key, id.inner().made_from,
         F("failed to get RSA verifying key for %s") % id);
@@ -3501,9 +3508,9 @@ database::check_signature(key_id const &
   L(FL("checking %d-byte signature") % signature().size());
 
   if (verifier->verify_message(
-        reinterpret_cast<Botan::byte const*>(alleged_text.data()),
+        reinterpret_cast<Botan::byte const *>(alleged_text.data()),
         alleged_text.size(),
-        reinterpret_cast<Botan::byte const*>(signature().data()),
+        reinterpret_cast<Botan::byte const *>(signature().data()),
         signature().size()))
     return cert_ok;
   else
@@ -3530,11 +3537,11 @@ database_impl::cert_exists(cert const & 
                   "AND value = ? "
                   "AND keypair_id = ? "
                   "AND signature = ?")
-    % blob(t.ident.inner()())
-    % text(t.name())
-    % blob(t.value())
-    % blob(t.key.inner()())
-    % blob(t.sig());
+            % blob(t.ident.inner()())
+            % text(t.name())
+            % blob(t.value())
+            % blob(t.key.inner()())
+            % blob(t.sig());
 
   fetch(res, 1, any_rows, q);
 
@@ -3646,9 +3653,9 @@ database_impl::install_functions()
 
   // register any functions we're going to use
   I(sqlite3_create_function(sql(), "gunzip", -1,
-                           SQLITE_UTF8, NULL,
-                           &sqlite3_gunzip_fn,
-                           NULL, NULL) == 0);
+                            SQLITE_UTF8, NULL,
+                            &sqlite3_gunzip_fn,
+                            NULL, NULL) == 0);
 }
 
 void
@@ -3727,7 +3734,7 @@ database_impl::get_certs(id const & iden
 
   fetch(res, 5, any_rows,
         q % blob(ident())
-          % text(name()));
+        % text(name()));
   results_to_certs(res, certs);
 }
 
@@ -3743,7 +3750,7 @@ database_impl::get_certs(cert_name const
 
   fetch(res, 6, any_rows,
         q % text(name())
-          % blob(val()));
+        % blob(val()));
   results_to_certs(res, certs);
 }
 
@@ -3761,8 +3768,8 @@ database_impl::get_certs(id const & iden
 
   fetch(res, 5, any_rows,
         q % blob(ident())
-          % text(name())
-          % blob(value()));
+        % text(name())
+        % blob(value()));
   results_to_certs(res, certs);
 }
 
@@ -3899,7 +3906,7 @@ outdated_indicator
 }
 
 outdated_indicator
-database::get_revision_cert_nobranch_index(vector< pair<revision_id,
+database::get_revision_cert_nobranch_index(vector < pair < revision_id,
                                            pair<revision_id, key_id> > > & idx)
 {
   // share some storage
@@ -3930,7 +3937,7 @@ database::get_revision_certs(cert_name c
 
 outdated_indicator
 database::get_revision_certs(cert_name const & name,
-                            vector<cert> & certs)
+                             vector<cert> & certs)
 {
   imp->get_certs(name, certs, "revision_certs");
   return imp->cert_stamper.get_indicator();
@@ -4047,7 +4054,8 @@ database::revision_cert_exists(revision_
 
 // FIXME: the bogus-cert family of functions is ridiculous
 // and needs to be replaced, or at least factored.
-namespace {
+namespace
+{
   struct trust_value
   {
     set<key_id> good_sigs;
@@ -4226,10 +4234,10 @@ database_impl::add_prefix_matching_const
   else
     {
       for (string::const_iterator i = prefix.begin(); i != prefix.end(); ++i)
-       {
-         E(is_xdigit(*i), origin::user,
-           F("bad character '%c' in id name '%s'") % *i % prefix);
-       }
+        {
+          E(is_xdigit(*i), origin::user,
+            F("bad character '%c' in id name '%s'") % *i % prefix);
+        }
 
       string lower_hex = prefix;
       if (lower_hex.size() < constants::idlen)
@@ -4509,8 +4517,8 @@ database::get_var(var_key const & key, v
   imp->fetch(res, one_col, any_rows,
              query("SELECT value FROM db_vars "
                    "WHERE domain = ? AND name = ?")
-                   % text(key.first())
-                   % blob(key.second()));
+             % text(key.first())
+             % blob(key.second()));
   I(res.size() == 1);
   var_value dbvalue(res[0][0], origin::database);
   value = dbvalue;
@@ -4525,8 +4533,8 @@ database::var_exists(var_key const & key
                    "WHERE EXISTS("
                    "  SELECT 1 FROM db_vars "
                    "  WHERE domain = ? AND name = ?)")
-                   % text(key.first())
-                   % blob(key.second()));
+             % text(key.first())
+             % blob(key.second()));
   return ! res.empty();
 }
 
@@ -4643,31 +4651,31 @@ database::get_branches(vector<string> & 
 outdated_indicator
 database::get_branches(vector<string> & names)
 {
-    results res;
-    query q("SELECT DISTINCT branch FROM branch_leaves");
-    string cert_name = "branch";
-    imp->fetch(res, one_col, any_rows, q);
-    for (size_t i = 0; i < res.size(); ++i)
-      {
-        names.push_back(res[i][0]);
-      }
-    return imp->cert_stamper.get_indicator();
+  results res;
+  query q("SELECT DISTINCT branch FROM branch_leaves");
+  string cert_name = "branch";
+  imp->fetch(res, one_col, any_rows, q);
+  for (size_t i = 0; i < res.size(); ++i)
+    {
+      names.push_back(res[i][0]);
+    }
+  return imp->cert_stamper.get_indicator();
 }
 
 outdated_indicator
 database::get_branches(globish const & glob,
                        vector<string> & names)
 {
-    results res;
-    query q("SELECT DISTINCT value FROM revision_certs WHERE name = ?");
-    string cert_name = "branch";
-    imp->fetch(res, one_col, any_rows, q % text(cert_name));
-    for (size_t i = 0; i < res.size(); ++i)
-      {
-        if (glob.matches(res[i][0]))
-          names.push_back(res[i][0]);
-      }
-    return imp->cert_stamper.get_indicator();
+  results res;
+  query q("SELECT DISTINCT value FROM revision_certs WHERE name = ?");
+  string cert_name = "branch";
+  imp->fetch(res, one_col, any_rows, q % text(cert_name));
+  for (size_t i = 0; i < res.size(); ++i)
+    {
+      if (glob.matches(res[i][0]))
+        names.push_back(res[i][0]);
+    }
+  return imp->cert_stamper.get_indicator();
 }
 
 void
@@ -4978,7 +4986,7 @@ database_path_helper::get_database_path(
     origin::user, F("no default database location configured"));
 
   for (vector<system_path>::const_iterator i = search_paths.begin();
-     i != search_paths.end(); ++i)
+       i != search_paths.end(); ++i)
     {
       if (file_exists((*i) / basename))
         {
@@ -4995,7 +5003,7 @@ database_path_helper::get_database_path(
     {
       path = (*search_paths.begin()) / basename;
       L(FL("no path expansions found for '%s', using '%s'")
-          % opts.dbname_alias % path);
+        % opts.dbname_alias % path);
       return;
     }
 
@@ -5003,7 +5011,7 @@ database_path_helper::get_database_path(
     {
       path = (*candidates.begin());
       L(FL("one path expansion found for '%s': '%s'")
-          % opts.dbname_alias % path);
+        % opts.dbname_alias % path);
       return;
     }
 
@@ -5025,8 +5033,8 @@ database_path_helper::maybe_set_default_
 database_path_helper::maybe_set_default_alias(options & opts)
 {
   if (opts.dbname_given && (
-       !opts.dbname.as_internal().empty() ||
-       !opts.dbname_alias.empty()))
+        !opts.dbname.as_internal().empty() ||
+        !opts.dbname_alias.empty()))
     {
       return;
     }
============================================================
--- src/database.hh	66798497f77bc1010b7e5850d2c406eaf1010b2e
+++ src/database.hh	38b1b0f245c71d20e896fa05660203b0060c0fc3
@@ -182,7 +182,7 @@ public:
   void get_reverse_ancestry(rev_ancestry_map & graph);
 
   void get_revision_parents(revision_id const & ident,
-                           std::set<revision_id> & parents);
+                            std::set<revision_id> & parents);
 
   void get_revision_children(revision_id const & ident,
                              std::set<revision_id> & children);
@@ -302,18 +302,18 @@ public:
   void record_as_branch_leaf(cert_value const & branch, revision_id const & rev);
 
   // this variant has to be rather coarse and fast, for netsync's use
-  outdated_indicator get_revision_cert_nobranch_index(std::vector< std::pair<revision_id,
-                              std::pair<revision_id, key_id> > > & idx);
+  outdated_indicator get_revision_cert_nobranch_index(std::vector < std::pair < revision_id,
+                                                      std::pair<revision_id, key_id> > > & idx);
 
   // Only used by database_check.cc
   outdated_indicator get_revision_certs(std::vector<cert> & certs);
 
   outdated_indicator get_revision_certs(cert_name const & name,
-                          std::vector<cert> & certs);
+                                        std::vector<cert> & certs);
 
   outdated_indicator get_revision_certs(revision_id const & ident,
-                          cert_name const & name,
-                          std::vector<cert> & certs);
+                                        cert_name const & name,
+                                        std::vector<cert> & certs);
 
   // Only used by get_branch_certs (project.cc)
   outdated_indicator get_revision_certs(cert_name const & name,
@@ -322,14 +322,14 @@ public:
 
   // Only used by revision_is_in_branch (project.cc)
   outdated_indicator get_revision_certs(revision_id const & ident,
-                          cert_name const & name,
-                          cert_value const & value,
-                          std::vector<cert> & certs);
+                                        cert_name const & name,
+                                        cert_value const & value,
+                                        std::vector<cert> & certs);
 
   // Only used by get_branch_heads (project.cc)
   outdated_indicator get_revisions_with_cert(cert_name const & name,
-                               cert_value const & value,
-                               std::set<revision_id> & revisions);
+                                             cert_value const & value,
+                                             std::set<revision_id> & revisions);
 
   // Used by get_branch_heads (project.cc)
   // Will also be needed by daggy-refinement, if/when implemented
@@ -343,18 +343,18 @@ public:
 
   // Used through project.cc
   outdated_indicator get_revision_certs(revision_id const & ident,
-                          std::vector<cert> & certs);
+                                        std::vector<cert> & certs);
 
   // Used through get_revision_cert_hashes (project.cc)
   outdated_indicator get_revision_certs(revision_id const & ident,
-                          std::vector<id> & hashes);
+                                        std::vector<id> & hashes);
 
   void get_revision_cert(id const & hash, cert & c);
 
-  typedef boost::function<bool(std::set<key_id> const &,
-                               id const &,
-                               cert_name const &,
-                               cert_value const &)> cert_trust_checker;
+  typedef boost::function < bool(std::set<key_id> const &,
+                                 id const &,
+                                 cert_name const &,
+                                 cert_value const &) > cert_trust_checker;
   // this takes a project_t so it can translate key names for the trust hook
   void erase_bogus_certs(project_t const & project, std::vector<cert> & certs);
   // permit alternative trust functions
@@ -402,8 +402,8 @@ public:
   //
 public:
   void prefix_matching_constraint(std::string const & colname,
-                                   std::string const & prefix,
-                                   std::string & constraint);
+                                  std::string const & prefix,
+                                  std::string & constraint);
 
   void complete(std::string const & partial,
                 std::set<revision_id> & completions);
@@ -574,9 +574,9 @@ public:
   bool acquired;
   bool const exclusive;
 public:
-  conditional_transaction_guard(database & db, bool exclusive=true,
-                                size_t checkpoint_batch_size=1000,
-                                size_t checkpoint_batch_bytes=0xfffff)
+  conditional_transaction_guard(database & db, bool exclusive = true,
+                                size_t checkpoint_batch_size = 1000,
+                                size_t checkpoint_batch_bytes = 0xfffff)
     : db(db),
       checkpoint_batch_size(checkpoint_batch_size),
       checkpoint_batch_bytes(checkpoint_batch_bytes),
@@ -595,9 +595,9 @@ public:
 class transaction_guard : public conditional_transaction_guard
 {
 public:
-  transaction_guard(database & d, bool exclusive=true,
-                    size_t checkpoint_batch_size=1000,
-                    size_t checkpoint_batch_bytes=0xfffff)
+  transaction_guard(database & d, bool exclusive = true,
+                    size_t checkpoint_batch_size = 1000,
+                    size_t checkpoint_batch_bytes = 0xfffff)
     : conditional_transaction_guard(d, exclusive, checkpoint_batch_size,
                                     checkpoint_batch_bytes)
   {
============================================================
--- src/diff_output.cc	8df7c0bec2e64275f6c8e8b6ccb035c8fd0f684f
+++ src/diff_output.cc	9d30ea1f245cb9850a43fe16c841054e5e890605
@@ -136,9 +136,9 @@ void walk_hunk_consumer(vector<long, QA(
             continue;
 
           cons.advance_to(a);
-          while (idx(lines1,a) != *i)
-              cons.delete_at(a++);
-          while (idx(lines2,b) != *i)
+          while (idx(lines1, a) != *i)
+            cons.delete_at(a++);
+          while (idx(lines2, b) != *i)
             cons.insert_at(b++);
         }
       if (a < lines1.size())
@@ -171,7 +171,7 @@ struct unidiff_hunk_writer : public hunk
                       size_t ctx,
                       ostream & ost,
                       string const & encloser_pattern)
-  : hunk_consumer(a, b, ctx, ost, encloser_pattern)
+    : hunk_consumer(a, b, ctx, ost, encloser_pattern)
   {}
 };
 
@@ -205,7 +205,7 @@ void unidiff_hunk_writer::flush_hunk(siz
         ost << "@@ -0,0";
       else
         {
-          ost << "@@ -" << a_begin+1;
+          ost << "@@ -" << a_begin + 1;
           if (a_len > 1)
             ost << ',' << a_len;
         }
@@ -214,7 +214,7 @@ void unidiff_hunk_writer::flush_hunk(siz
         ost << " +0,0";
       else
         {
-          ost << " +" << b_begin+1;
+          ost << " +" << b_begin + 1;
           if (b_len > 1)
             ost << ',' << b_len;
         }
@@ -298,8 +298,8 @@ struct cxtdiff_hunk_writer : public hunk
                       size_t ctx,
                       ostream & ost,
                       string const & encloser_pattern)
-  : hunk_consumer(a, b, ctx, ost, encloser_pattern),
-    have_insertions(false), have_deletions(false)
+    : hunk_consumer(a, b, ctx, ost, encloser_pattern),
+      have_insertions(false), have_deletions(false)
   {}
 };
 
@@ -504,7 +504,7 @@ make_diff(string const & filename1,
        i != lines2.end(); ++i)
     right_interned.push_back(in.intern(*i));
 
-  lcs.reserve(min(lines1.size(),lines2.size()));
+  lcs.reserve(min(lines1.size(), lines2.size()));
   longest_common_subsequence(left_interned.begin(), left_interned.end(),
                              right_interned.begin(), right_interned.end(),
                              back_inserter(lcs));
@@ -567,35 +567,35 @@ make_diff(string const & filename1,
   //      if the new file name is "/dev/null", nothing else.
   switch (type)
     {
-      case unified_diff:
-      {
-        ost << "--- " << filename1 << '\t'
-            << id1 << '\n';
-        ost << "+++ " << filename2 << '\t'
-            << id2 << '\n';
+    case unified_diff:
+    {
+      ost << "--- " << filename1 << '\t'
+          << id1 << '\n';
+      ost << "+++ " << filename2 << '\t'
+          << id2 << '\n';
 
-        unidiff_hunk_writer hunks(lines1, lines2, 3, ost, pattern);
-        walk_hunk_consumer(lcs, left_interned, right_interned, hunks);
-        break;
-      }
-      case context_diff:
-      {
-        ost << "*** " << filename1 << '\t'
-            << id1 << '\n';
-        ost << "--- " << filename2 << '\t'
-            << id2 << '\n';
+      unidiff_hunk_writer hunks(lines1, lines2, 3, ost, pattern);
+      walk_hunk_consumer(lcs, left_interned, right_interned, hunks);
+      break;
+    }
+    case context_diff:
+    {
+      ost << "*** " << filename1 << '\t'
+          << id1 << '\n';
+      ost << "--- " << filename2 << '\t'
+          << id2 << '\n';
 
-        cxtdiff_hunk_writer hunks(lines1, lines2, 3, ost, pattern);
-        walk_hunk_consumer(lcs, left_interned, right_interned, hunks);
-        break;
-      }
-      default:
-      {
-        // should never reach this; the external_diff type is not
-        // handled by this function.
-        I(false);
-      }
+      cxtdiff_hunk_writer hunks(lines1, lines2, 3, ost, pattern);
+      walk_hunk_consumer(lcs, left_interned, right_interned, hunks);
+      break;
     }
+    default:
+    {
+      // should never reach this; the external_diff type is not
+      // handled by this function.
+      I(false);
+    }
+    }
 }
 
 
============================================================
--- src/file_io.cc	76874e5ca4978bd0fd3ba183cd19edc3447ece0d
+++ src/file_io.cc	7536c178477a2afff169c60634392f832948b269
@@ -122,11 +122,14 @@ directory_empty(any_path const & path)
   };
 
   directory_empty_helper h;
-  try {
-    read_directory(path, h, h, h);
-  } catch (directory_not_empty_exception) {
-    return false;
-  }
+  try
+    {
+      read_directory(path, h, h, h);
+    }
+  catch (directory_not_empty_exception)
+    {
+      return false;
+    }
   return true;
 }
 
@@ -149,8 +152,9 @@ bool guess_binary(string const & s)
 
 bool guess_binary(string const & s)
 {
-  static const bool char_is_binary[256] = {
-  //_0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _A _B _C _D _E _F
+  static const bool char_is_binary[256] =
+  {
+    //_0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _A _B _C _D _E _F
     1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, // 0_
     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1_
     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2_
@@ -465,7 +469,7 @@ calculate_ident(file_path const & file,
 {
   // no conversions necessary, use streaming form
   static cached_botan_pipe
-    p(new Botan::Pipe(new Botan::Hash_Filter("SHA-160")));
+  p(new Botan::Pipe(new Botan::Hash_Filter("SHA-160")));
 
   // Best to be safe and check it isn't a dir.
   assert_path_is_file(file);
============================================================
--- src/file_io.hh	1ecc0d6d6f9dddeb676ac95ca074c2dfb82e58d0
+++ src/file_io.hh	ee0eb7ff39d28ff626a9095fa5e88a064035dcba
@@ -123,7 +123,7 @@ public:
   ~directory_cleanup_helper()
   {
     if (!committed && directory_exists(dir))
-       {
+      {
         // This is probably happening in the middle of another exception.
         // Do not let anything that delete_dir_recursive throws escape, or
         // the runtime will call std::terminate...
============================================================
--- src/interner.hh	c300647e6937a8a68f33fbbcd647e6fd9455a87f
+++ src/interner.hh	9ff590310d8c5fae9da5acd1b510788c223a9da7
@@ -16,7 +16,7 @@ struct
 
 template <typename T>
 struct
-interner
+  interner
 {
   typedef typename hashmap::hash_map<std::string, T> hmap;
 
============================================================
--- src/keys.cc	5e83c66959453e7454ea99b5a403c41ab4982461
+++ src/keys.cc	c5ed270951bef96b89e720da1d1c6ee859fcf93f
@@ -63,7 +63,8 @@ load_key_pair(key_store & keys,
   keys.get_key_pair(id, name, kp);
 }
 
-namespace {
+namespace
+{
   void check_and_save_chosen_key(database & db,
                                  key_store & keys,
                                  key_id const & chosen_key)
============================================================
--- src/lua_hooks.cc	1c178085332c73dcefb4681d205d17b059e52080
+++ src/lua_hooks.cc	9649d1ff6898e68754148ec117c9528a11475a21
@@ -49,14 +49,14 @@ static int panic_thrower(lua_State * st)
 // the command line (and so known only to the app_state), and still be
 // available to lua
 // please *don't* use it for complex things that can throw errors
-static map<lua_State*, app_state*> map_of_lua_to_app;
+static map<lua_State *, app_state *> map_of_lua_to_app;
 
 extern "C"
 {
   static int
-  monotone_get_confdir_for_lua(lua_State *LS)
+  monotone_get_confdir_for_lua(lua_State * LS)
   {
-    map<lua_State*, app_state*>::iterator i = map_of_lua_to_app.find(LS);
+    map<lua_State *, app_state *>::iterator i = map_of_lua_to_app.find(LS);
     if (i != map_of_lua_to_app.end())
       {
         if (i->second->opts.conf_dir_given
@@ -75,7 +75,7 @@ extern "C"
   }
   // taken from http://medek.wordpress.com/2009/02/03/wrapping-lua-errors-and-print-function/
   static int
-  monotone_message(lua_State *LS)
+  monotone_message(lua_State * LS)
   {
     int nArgs = lua_gettop(LS);
     lua_getglobal(LS, "tostring");
@@ -83,15 +83,15 @@ extern "C"
     string ret;
     for (int i = 1; i <= nArgs; ++i)
       {
-        const char *s;
+        const char * s;
         lua_pushvalue(LS, -1);
         lua_pushvalue(LS, i);
         lua_call(LS, 1, 1);
         s = lua_tostring(LS, -1);
         if (s == NULL)
           return luaL_error(
-            LS, LUA_QL("tostring") " must return a string to ", LUA_QL("print")
-          );
+                   LS, LUA_QL("tostring") " must return a string to ", LUA_QL("print")
+                 );
 
         if (i > 1)
           ret.append("\t");
@@ -108,28 +108,29 @@ app_state*
 }
 
 app_state*
-get_app_state(lua_State *LS)
+get_app_state(lua_State * LS)
 {
-  map<lua_State*, app_state*>::iterator i = map_of_lua_to_app.find(LS);
+  map<lua_State *, app_state *>::iterator i = map_of_lua_to_app.find(LS);
   if (i != map_of_lua_to_app.end())
     return i->second;
   else
     return NULL;
 }
 
-namespace {
+namespace
+{
   Lua & push_key_identity_info(Lua & ll,
                                key_identity_info const & info)
   {
     hexenc<id> hexid;
     encode_hexenc(info.id.inner(), hexid);
     ll.push_table()
-      .push_str(hexid())
-      .set_field("id")
-      .push_str(info.given_name())
-      .set_field("given_name")
-      .push_str(info.official_name())
-      .set_field("name");
+    .push_str(hexid())
+    .set_field("id")
+    .push_str(info.given_name())
+    .set_field("given_name")
+    .push_str(info.official_name())
+    .set_field("name");
     return ll;
   }
 }
@@ -157,8 +158,8 @@ lua_hooks::lua_hooks(app_state * app)
     " error(\"io.popen disabled for security reasons.  Try spawn_pipe().\") "
     "end ";
 
-    if (!run_string(st, disable_dangerous,
-                    "<disabled dangerous functions>"))
+  if (!run_string(st, disable_dangerous,
+                  "<disabled dangerous functions>"))
     throw oops("lua error while disabling existing functions");
 
   // redirect output to internal message handler which calls into
@@ -172,8 +173,8 @@ lua_hooks::lua_hooks(app_state * app)
     "  message(...) "
     "end ";
 
-    if (!run_string(st, redirect_output,
-                    "<redirect output>"))
+  if (!run_string(st, redirect_output,
+                  "<redirect output>"))
     throw oops("lua error while redirecting output");
 
   map_of_lua_to_app.insert(make_pair(st, app));
@@ -181,7 +182,7 @@ lua_hooks::~lua_hooks()
 
 lua_hooks::~lua_hooks()
 {
-  map<lua_State*, app_state*>::iterator i = map_of_lua_to_app.find(st);
+  map<lua_State *, app_state *>::iterator i = map_of_lua_to_app.find(st);
   if (st)
     lua_close (st);
   if (i != map_of_lua_to_app.end())
@@ -282,8 +283,8 @@ lua_hooks::hook_exists(string const & fu
 lua_hooks::hook_exists(string const & func_name)
 {
   return Lua(st)
-    .func(func_name)
-    .ok();
+         .func(func_name)
+         .ok();
 }
 
 // concrete hooks
@@ -297,8 +298,8 @@ lua_hooks::hook_get_passphrase(key_ident
   Lua ll(st);
   ll.func("get_passphrase");
   push_key_identity_info(ll, identity);
-  ll.call(1,1)
-    .extract_classified_str(phrase);
+  ll.call(1, 1)
+  .extract_classified_str(phrase);
   return ll.ok();
 }
 
@@ -310,7 +311,7 @@ lua_hooks::hook_get_local_key_name(key_i
   ll.func("get_local_key_name");
   push_key_identity_info(ll, info);
   ll.call(1, 1)
-    .extract_str(local_name);
+  .extract_str(local_name);
   if (ll.ok())
     {
       info.official_name = key_name(local_name, origin::user);
@@ -325,10 +326,10 @@ lua_hooks::hook_persist_phrase_ok()
 {
   bool persist_ok = false;
   bool executed_ok = Lua(st)
-    .func("persist_phrase_ok")
-    .call(0,1)
-    .extract_bool(persist_ok)
-    .ok();
+                     .func("persist_phrase_ok")
+                     .call(0, 1)
+                     .extract_bool(persist_ok)
+                     .ok();
   return executed_ok && persist_ok;
 }
 
@@ -337,11 +338,11 @@ lua_hooks::hook_expand_selector(string c
                                 string & exp)
 {
   return Lua(st)
-    .func("expand_selector")
-    .push_str(sel)
-    .call(1,1)
-    .extract_str(exp)
-    .ok();
+         .func("expand_selector")
+         .push_str(sel)
+         .call(1, 1)
+         .extract_str(exp)
+         .ok();
 }
 
 bool
@@ -349,12 +350,12 @@ lua_hooks::hook_expand_date(string const
                             string & exp)
 {
   exp.clear();
-  bool res= Lua(st)
-    .func("expand_date")
-    .push_str(sel)
-    .call(1,1)
-    .extract_str(exp)
-    .ok();
+  bool res = Lua(st)
+             .func("expand_date")
+             .push_str(sel)
+             .call(1, 1)
+             .extract_str(exp)
+             .ok();
   return res && exp.size();
 }
 
@@ -366,11 +367,11 @@ lua_hooks::hook_get_branch_key(branch_na
 {
   string key;
   bool ok = Lua(st)
-    .func("get_branch_key")
-    .push_str(branchname())
-    .call(1,1)
-    .extract_str(key)
-    .ok();
+            .func("get_branch_key")
+            .push_str(branchname())
+            .call(1, 1)
+            .extract_str(key)
+            .ok();
 
   if (!ok || key.empty())
     return false;
@@ -390,11 +391,11 @@ lua_hooks::hook_get_author(branch_name c
 {
   Lua ll(st);
   ll.func("get_author")
-    .push_str(branchname());
+  .push_str(branchname());
   push_key_identity_info(ll, identity);
-  return ll.call(2,1)
-    .extract_str(author)
-    .ok();
+  return ll.call(2, 1)
+         .extract_str(author)
+         .ok();
 }
 
 bool
@@ -403,11 +404,11 @@ lua_hooks::hook_edit_comment(external co
 {
   string result_str;
   bool is_ok = Lua(st)
-                 .func("edit_comment")
-                 .push_str(user_log_message())
-                 .call(1,1)
-                 .extract_str(result_str)
-                 .ok();
+               .func("edit_comment")
+               .push_str(user_log_message())
+               .call(1, 1)
+               .extract_str(result_str)
+               .ok();
   result = external(result_str, origin::user);
   return is_ok;
 }
@@ -417,11 +418,11 @@ lua_hooks::hook_ignore_file(file_path co
 {
   bool ignore_it = false;
   bool exec_ok = Lua(st)
-    .func("ignore_file")
-    .push_str(p.as_external())
-    .call(1,1)
-    .extract_bool(ignore_it)
-    .ok();
+                 .func("ignore_file")
+                 .push_str(p.as_external())
+                 .call(1, 1)
+                 .extract_bool(ignore_it)
+                 .ok();
   return exec_ok && ignore_it;
 }
 
@@ -430,15 +431,16 @@ lua_hooks::hook_ignore_branch(branch_nam
 {
   bool ignore_it = false;
   bool exec_ok = Lua(st)
-    .func("ignore_branch")
-    .push_str(branch())
-    .call(1,1)
-    .extract_bool(ignore_it)
-    .ok();
+                 .func("ignore_branch")
+                 .push_str(branch())
+                 .call(1, 1)
+                 .extract_bool(ignore_it)
+                 .ok();
   return exec_ok && ignore_it;
 }
 
-namespace {
+namespace
+{
   template<typename ID>
   Lua & push_key_ident(Lua & ll, ID const & ident)
   {
@@ -478,12 +480,12 @@ namespace {
     hexenc<id> hid(encode_hexenc(hash(), hash.made_from), hash.made_from);
     bool ok;
     bool exec_ok = ll
-      .push_str(hid())
-      .push_str(name())
-      .push_str(val())
-      .call(4, 1)
-      .extract_bool(ok)
-      .ok();
+                   .push_str(hid())
+                   .push_str(name())
+                   .push_str(val())
+                   .call(4, 1)
+                   .extract_bool(ok)
+                   .ok();
 
     return exec_ok && ok;
   }
@@ -491,9 +493,9 @@ lua_hooks::hook_get_revision_cert_trust(
 
 bool
 lua_hooks::hook_get_revision_cert_trust(std::set<key_identity_info> const & signers,
-                                       id const & hash,
-                                       cert_name const & name,
-                                       cert_value const & val)
+                                        id const & hash,
+                                        cert_name const & name,
+                                        cert_value const & val)
 {
   Lua ll(st);
   ll.func("get_revision_cert_trust");
@@ -517,8 +519,8 @@ lua_hooks::hook_accept_testresult_change
 {
   Lua ll(st);
   ll
-    .func("accept_testresult_change")
-    .push_table();
+  .func("accept_testresult_change")
+  .push_table();
 
   for (map<key_id, bool>::const_iterator i = old_results.begin();
        i != old_results.end(); ++i)
@@ -540,9 +542,9 @@ lua_hooks::hook_accept_testresult_change
 
   bool ok;
   bool exec_ok = ll
-    .call(2, 1)
-    .extract_bool(ok)
-    .ok();
+                 .call(2, 1)
+                 .extract_bool(ok)
+                 .ok();
 
   return exec_ok && ok;
 }
@@ -561,17 +563,17 @@ lua_hooks::hook_merge3(file_path const &
 {
   string res;
   bool ok = Lua(st)
-    .func("merge3")
-    .push_str(anc_path.as_external())
-    .push_str(left_path.as_external())
-    .push_str(right_path.as_external())
-    .push_str(merged_path.as_external())
-    .push_str(ancestor())
-    .push_str(left())
-    .push_str(right())
-    .call(7,1)
-    .extract_str(res)
-    .ok();
+            .func("merge3")
+            .push_str(anc_path.as_external())
+            .push_str(left_path.as_external())
+            .push_str(right_path.as_external())
+            .push_str(merged_path.as_external())
+            .push_str(ancestor())
+            .push_str(left())
+            .push_str(right())
+            .call(7, 1)
+            .extract_str(res)
+            .ok();
   result = data(res, origin::user);
   return ok;
 }
@@ -589,8 +591,8 @@ lua_hooks::hook_external_diff(file_path 
   Lua ll(st);
 
   ll
-    .func("external_diff")
-    .push_str(path.as_external());
+  .func("external_diff")
+  .push_str(path.as_external());
 
   if (oldrev.length() != 0)
     ll.push_str(data_old());
@@ -609,7 +611,7 @@ lua_hooks::hook_external_diff(file_path 
   ll.push_str(oldrev);
   ll.push_str(newrev);
 
-  return ll.call(7,0).ok();
+  return ll.call(7, 0).ok();
 }
 
 bool
@@ -617,7 +619,7 @@ lua_hooks::hook_get_encloser_pattern(fil
                                      string & pattern)
 {
   bool exec_ok
-    = Lua(st)
+  = Lua(st)
     .func("get_encloser_pattern")
     .push_str(path.as_external())
     .call(1, 1)
@@ -671,7 +673,7 @@ lua_hooks::hook_get_date_format_spec(dat
 {
   string in_spec;
   switch (in)
-  {
+    {
     case date_long:         in_spec = "date_long"; break;
     case date_short:        in_spec = "date_short"; break;
     case time_long:         in_spec = "time_long"; break;
@@ -679,10 +681,10 @@ lua_hooks::hook_get_date_format_spec(dat
     case date_time_long:    in_spec = "date_time_long"; break;
     case date_time_short:   in_spec = "date_time_short"; break;
     default: I(false);
-  }
+    }
 
   bool exec_ok
-    = Lua(st)
+  = Lua(st)
     .func("get_date_format_spec")
     .push_str(in_spec)
     .call(1, 1)
@@ -697,12 +699,12 @@ bool lua_hooks::hook_get_default_databas
 
 bool lua_hooks::hook_get_default_database_alias(string & alias)
 {
-   bool exec_ok
-     = Lua(st)
-     .func("get_default_database_alias")
-     .call(0, 1)
-     .extract_str(alias)
-     .ok();
+  bool exec_ok
+  = Lua(st)
+    .func("get_default_database_alias")
+    .call(0, 1)
+    .extract_str(alias)
+    .ok();
 
   return exec_ok;
 }
@@ -725,13 +727,13 @@ bool lua_hooks::hook_get_default_databas
 
 bool lua_hooks::hook_get_default_database_glob(globish & out)
 {
-   string glob;
-   bool exec_ok
-     = Lua(st)
-     .func("get_default_database_glob")
-     .call(0, 1)
-     .extract_str(glob)
-     .ok();
+  string glob;
+  bool exec_ok
+  = Lua(st)
+    .func("get_default_database_glob")
+    .call(0, 1)
+    .extract_str(glob)
+    .ok();
 
   out = globish(glob, origin::user);
   return exec_ok;
@@ -743,10 +745,10 @@ bool lua_hooks::hook_hook_wrapper(string
 {
   Lua ll(st);
   ll.func("hook_wrapper")
-    .push_str(func_name);
+  .push_str(func_name);
 
   for (vector<string>::const_iterator i = args.begin();
-        i != args.end(); ++i)
+       i != args.end(); ++i)
     {
       ll.push_str(*i);
     }
@@ -760,11 +762,11 @@ lua_hooks::hook_get_man_page_formatter_c
 lua_hooks::hook_get_man_page_formatter_command(string & command)
 {
   bool exec_ok
-     = Lua(st)
-     .func("get_man_page_formatter_command")
-     .call(0, 1)
-     .extract_str(command)
-     .ok();
+  = Lua(st)
+    .func("get_man_page_formatter_command")
+    .call(0, 1)
+    .extract_str(command)
+    .ok();
 
   return exec_ok;
 }
@@ -775,10 +777,10 @@ lua_hooks::hook_use_inodeprints()
   bool use = false, exec_ok = false;
 
   exec_ok = Lua(st)
-    .func("use_inodeprints")
-    .call(0, 1)
-    .extract_bool(use)
-    .ok();
+            .func("use_inodeprints")
+            .call(0, 1)
+            .extract_bool(use)
+            .ok();
   return use && exec_ok;
 }
 
@@ -792,7 +794,7 @@ lua_hooks::hook_get_netsync_client_key(u
 {
   string name;
   bool exec_ok
-    = Lua(st)
+  = Lua(st)
     .func("get_netsync_client_key")
     .push_str(server_address())
     .push_str(include())
@@ -833,8 +835,8 @@ lua_hooks::hook_get_netsync_server_key(v
     }
 
   bool exec_ok = ll.call(1, 1)
-                   .extract_str(name)
-                   .ok();
+                 .extract_str(name)
+                 .ok();
 
   if (!exec_ok || name.empty())
     return false;
@@ -938,7 +940,7 @@ lua_hooks::hook_get_netsync_connect_comm
       ll.set_table();
     }
 
-  ll.call(2,1);
+  ll.call(2, 1);
 
   ll.begin();
 
@@ -960,7 +962,7 @@ lua_hooks::hook_use_transport_auth(uri_t
   Lua ll(st);
   ll.func("use_transport_auth");
   push_uri(uri, ll);
-  ll.call(1,1);
+  ll.call(1, 1);
   ll.extract_bool(use_auth);
 
   // NB: we want to return *true* here if there's a failure.
@@ -976,11 +978,11 @@ lua_hooks::hook_get_netsync_read_permitt
 
   Lua ll(st);
   ll.func("get_netsync_read_permitted")
-    .push_str(branch);
+  .push_str(branch);
   push_key_identity_info(ll, identity);
-  exec_ok = ll.call(2,1)
-    .extract_bool(permitted)
-    .ok();
+  exec_ok = ll.call(2, 1)
+            .extract_bool(permitted)
+            .ok();
 
   return exec_ok && permitted;
 }
@@ -992,12 +994,12 @@ lua_hooks::hook_get_netsync_read_permitt
   bool permitted = false, exec_ok = false;
 
   exec_ok = Lua(st)
-    .func("get_netsync_read_permitted")
-    .push_str(branch)
-    .push_nil()
-    .call(2,1)
-    .extract_bool(permitted)
-    .ok();
+            .func("get_netsync_read_permitted")
+            .push_str(branch)
+            .push_nil()
+            .call(2, 1)
+            .extract_bool(permitted)
+            .ok();
 
   return exec_ok && permitted;
 }
@@ -1010,9 +1012,9 @@ lua_hooks::hook_get_netsync_write_permit
   Lua ll(st);
   ll.func("get_netsync_write_permitted");
   push_key_identity_info(ll, identity);
-  exec_ok = ll.call(1,1)
-    .extract_bool(permitted)
-    .ok();
+  exec_ok = ll.call(1, 1)
+            .extract_bool(permitted)
+            .ok();
 
   return exec_ok && permitted;
 }
@@ -1072,8 +1074,8 @@ lua_hooks::hook_init_attributes(file_pat
   Lua ll(st);
 
   ll
-    .push_str("attr_init_functions")
-    .get_tab();
+  .push_str("attr_init_functions")
+  .get_tab();
 
   L(FL("calling attr_init_function for %s") % filename);
   ll.begin();
@@ -1110,14 +1112,14 @@ lua_hooks::hook_set_attribute(string con
                               string const & value)
 {
   return Lua(st)
-    .push_str("attr_functions")
-    .get_tab()
-    .push_str(attr)
-    .get_fn(-2)
-    .push_str(filename.as_external())
-    .push_str(value)
-    .call(2,0)
-    .ok();
+         .push_str("attr_functions")
+         .get_tab()
+         .push_str(attr)
+         .get_fn(-2)
+         .push_str(filename.as_external())
+         .push_str(value)
+         .call(2, 0)
+         .ok();
 }
 
 bool
@@ -1125,14 +1127,14 @@ lua_hooks::hook_clear_attribute(string c
                                 file_path const & filename)
 {
   return Lua(st)
-    .push_str("attr_functions")
-    .get_tab()
-    .push_str(attr)
-    .get_fn(-2)
-    .push_str(filename.as_external())
-    .push_nil()
-    .call(2,0)
-    .ok();
+         .push_str("attr_functions")
+         .get_tab()
+         .push_str(attr)
+         .get_fn(-2)
+         .push_str(filename.as_external())
+         .push_nil()
+         .call(2, 0)
+         .ok();
 }
 
 bool
@@ -1143,15 +1145,15 @@ lua_hooks::hook_validate_changes(revisio
 {
   validated = true;
   return Lua(st)
-    .func("validate_changes")
-    .push_str(new_rev.inner()())
-    .push_str(branchname())
-    .call(2, 2)
-    .extract_str(reason)
-    // XXX When validated, the extra returned string is superfluous.
-    .pop()
-    .extract_bool(validated)
-    .ok();
+         .func("validate_changes")
+         .push_str(new_rev.inner()())
+         .push_str(branchname())
+         .call(2, 2)
+         .extract_str(reason)
+         // XXX When validated, the extra returned string is superfluous.
+         .pop()
+         .extract_bool(validated)
+         .ok();
 }
 
 bool
@@ -1163,16 +1165,16 @@ lua_hooks::hook_validate_commit_message(
 {
   validated = true;
   return Lua(st)
-    .func("validate_commit_message")
-    .push_str(message())
-    .push_str(new_rev.inner()())
-    .push_str(branchname())
-    .call(3, 2)
-    .extract_str(reason)
-    // XXX When validated, the extra returned string is superfluous.
-    .pop()
-    .extract_bool(validated)
-    .ok();
+         .func("validate_commit_message")
+         .push_str(message())
+         .push_str(new_rev.inner()())
+         .push_str(branchname())
+         .call(3, 2)
+         .extract_str(reason)
+         // XXX When validated, the extra returned string is superfluous.
+         .pop()
+         .extract_bool(validated)
+         .ok();
 }
 
 bool
@@ -1182,9 +1184,9 @@ lua_hooks::hook_note_commit(revision_id 
 {
   Lua ll(st);
   ll
-    .func("note_commit")
-    .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
-    .push_str(rdat.inner()());
+  .func("note_commit")
+  .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
+  .push_str(rdat.inner()());
 
   ll.push_table();
 
@@ -1225,30 +1227,30 @@ lua_hooks::hook_note_netsync_start(size_
     }
   Lua ll(st);
   ll.func("note_netsync_start")
-    .push_int(session_id)
-    .push_str(my_role)
-    .push_str(type)
-    .push_str(remote_host);
+  .push_int(session_id)
+  .push_str(my_role)
+  .push_str(type)
+  .push_str(remote_host);
   push_key_identity_info(ll, remote_key);
   return ll.push_str(include_pattern())
-    .push_str(exclude_pattern())
-    .call(7, 0)
-    .ok();
+         .push_str(exclude_pattern())
+         .call(7, 0)
+         .ok();
 }
 
 bool
 lua_hooks::hook_note_netsync_revision_received(revision_id const & new_id,
                                                revision_data const & rdat,
-                                               std::set<pair<key_identity_info,
-                                               pair<cert_name,
-                                               cert_value> > > const & certs,
+                                               std::set < pair < key_identity_info,
+                                               pair < cert_name,
+                                               cert_value > > > const & certs,
                                                size_t session_id)
 {
   Lua ll(st);
   ll
-    .func("note_netsync_revision_received")
-    .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
-    .push_str(rdat.inner()());
+  .func("note_netsync_revision_received")
+  .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
+  .push_str(rdat.inner()());
 
   ll.push_table();
 
@@ -1276,16 +1278,16 @@ lua_hooks::hook_note_netsync_revision_se
 bool
 lua_hooks::hook_note_netsync_revision_sent(revision_id const & new_id,
                                            revision_data const & rdat,
-                                           std::set<pair<key_identity_info,
-                                           pair<cert_name,
-                                           cert_value> > > const & certs,
+                                           std::set < pair < key_identity_info,
+                                           pair < cert_name,
+                                           cert_value > > > const & certs,
                                            size_t session_id)
 {
   Lua ll(st);
   ll
-    .func("note_netsync_revision_sent")
-    .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
-    .push_str(rdat.inner()());
+  .func("note_netsync_revision_sent")
+  .push_str(encode_hexenc(new_id.inner()(), new_id.inner().made_from))
+  .push_str(rdat.inner()());
 
   ll.push_table();
 
@@ -1316,7 +1318,7 @@ lua_hooks::hook_note_netsync_pubkey_rece
 {
   Lua ll(st);
   ll
-    .func("note_netsync_pubkey_received");
+  .func("note_netsync_pubkey_received");
   push_key_identity_info(ll, identity);
   ll.push_int(session_id);
 
@@ -1330,7 +1332,7 @@ lua_hooks::hook_note_netsync_pubkey_sent
 {
   Lua ll(st);
   ll
-    .func("note_netsync_pubkey_sent");
+  .func("note_netsync_pubkey_sent");
   push_key_identity_info(ll, identity);
   ll.push_int(session_id);
 
@@ -1347,12 +1349,12 @@ lua_hooks::hook_note_netsync_cert_receiv
 {
   Lua ll(st);
   ll
-    .func("note_netsync_cert_received")
-    .push_str(encode_hexenc(rid.inner()(), rid.inner().made_from));
+  .func("note_netsync_cert_received")
+  .push_str(encode_hexenc(rid.inner()(), rid.inner().made_from));
   push_key_identity_info(ll, identity);
   ll.push_str(name())
-    .push_str(value())
-    .push_int(session_id);
+  .push_str(value())
+  .push_int(session_id);
 
   ll.call(5, 0);
   return ll.ok();
@@ -1367,12 +1369,12 @@ lua_hooks::hook_note_netsync_cert_sent(r
 {
   Lua ll(st);
   ll
-    .func("note_netsync_cert_sent")
-    .push_str(encode_hexenc(rid.inner()(), rid.inner().made_from));
+  .func("note_netsync_cert_sent")
+  .push_str(encode_hexenc(rid.inner()(), rid.inner().made_from));
   push_key_identity_info(ll, identity);
   ll.push_str(name())
-    .push_str(value())
-    .push_int(session_id);
+  .push_str(value())
+  .push_int(session_id);
 
   ll.call(5, 0);
   return ll.ok();
@@ -1387,19 +1389,19 @@ lua_hooks::hook_note_netsync_end(size_t 
 {
   Lua ll(st);
   return ll
-    .func("note_netsync_end")
-    .push_int(session_id)
-    .push_int(status)
-    .push_int(bytes_in)
-    .push_int(bytes_out)
-    .push_int(certs_in)
-    .push_int(certs_out)
-    .push_int(revs_in)
-    .push_int(revs_out)
-    .push_int(keys_in)
-    .push_int(keys_out)
-    .call(10, 0)
-    .ok();
+         .func("note_netsync_end")
+         .push_int(session_id)
+         .push_int(status)
+         .push_int(bytes_in)
+         .push_int(bytes_out)
+         .push_int(certs_in)
+         .push_int(certs_out)
+         .push_int(revs_in)
+         .push_int(revs_out)
+         .push_int(keys_in)
+         .push_int(keys_out)
+         .call(10, 0)
+         .ok();
 }
 
 bool
@@ -1420,11 +1422,11 @@ lua_hooks::hook_unmapped_git_author(stri
 lua_hooks::hook_unmapped_git_author(string const & unmapped_author, string & fixed_author)
 {
   return Lua(st)
-    .func("unmapped_git_author")
-    .push_str(unmapped_author)
-    .call(1,1)
-    .extract_str(fixed_author)
-    .ok();
+         .func("unmapped_git_author")
+         .push_str(unmapped_author)
+         .call(1, 1)
+         .extract_str(fixed_author)
+         .ok();
 }
 
 bool
@@ -1432,11 +1434,11 @@ lua_hooks::hook_validate_git_author(stri
 {
   bool valid = false, exec_ok = false;
   exec_ok = Lua(st)
-    .func("validate_git_author")
-    .push_str(author)
-    .call(1,1)
-    .extract_bool(valid)
-    .ok();
+            .func("validate_git_author")
+            .push_str(author)
+            .call(1, 1)
+            .extract_bool(valid)
+            .ok();
   return valid && exec_ok;
 }
 
============================================================
--- src/lua_hooks.hh	66412b9fa5db97cd3b3ec01cadf036fb346ac161
+++ src/lua_hooks.hh	dab3af10dfb9901cdfefe51e4ffc5164ae04acfa
@@ -36,7 +36,7 @@ struct key_identity_info;
 class project_t;
 struct key_identity_info;
 
-extern app_state* get_app_state(lua_State *LS);
+extern app_state * get_app_state(lua_State * LS);
 
 class lua_hooks
 {
@@ -69,9 +69,9 @@ public:
                          external & result);
   bool hook_persist_phrase_ok();
   bool hook_get_revision_cert_trust(std::set<key_identity_info> const & signers,
-                                   id const & hash,
-                                   cert_name const & name,
-                                   cert_value const & val);
+                                    id const & hash,
+                                    cert_name const & name,
+                                    cert_value const & val);
   bool hook_get_manifest_cert_trust(std::set<key_name> const & signers,
                                     id const & hash,
                                     cert_name const & name,
@@ -187,15 +187,15 @@ public:
                                globish exclude_pattern);
   bool hook_note_netsync_revision_received(revision_id const & new_id,
                                            revision_data const & rdat,
-                                           std::set<pair<key_identity_info,
-                                           pair<cert_name,
-                                           cert_value> > > const & certs,
+                                           std::set < pair < key_identity_info,
+                                           pair < cert_name,
+                                           cert_value > > > const & certs,
                                            size_t session_id);
   bool hook_note_netsync_revision_sent(revision_id const & new_id,
                                        revision_data const & rdat,
-                                       std::set<pair<key_identity_info,
-                                       pair<cert_name,
-                                       cert_value> > > const & certs,
+                                       std::set < pair < key_identity_info,
+                                       pair < cert_name,
+                                       cert_value > > > const & certs,
                                        size_t session_id);
   bool hook_note_netsync_pubkey_received(key_identity_info const & identity,
                                          size_t session_id);
============================================================
--- src/monotone.cc	a25ecdd170a601db798b63a60add7681609a34c4
+++ src/monotone.cc	26270104ceb434f9f4606645542f9777693067d3
@@ -118,7 +118,8 @@ cpp_main(int argc, char ** argv)
       unfiltered_pipe = new Botan::Pipe;
       new (unfiltered_pipe_cleanup_mem) cached_botan_pipe(unfiltered_pipe);
 
-      class _DbCacheEmptier {
+      class _DbCacheEmptier
+      {
       public:
         _DbCacheEmptier() { }
         ~_DbCacheEmptier() { database::reset_cache(); }
@@ -148,32 +149,32 @@ cpp_main(int argc, char ** argv)
 
       // check the botan library version we got linked against.
       u32 linked_botan_version = BOTAN_VERSION_CODE_FOR(
-        Botan::version_major(), Botan::version_minor(),
-        Botan::version_patch());
+                                   Botan::version_major(), Botan::version_minor(),
+                                   Botan::version_patch());
 
       // Botan 1.7.14 has an incompatible API change, which got reverted
       // again in 1.7.15. Thus we do not care to support 1.7.14.
-      E(linked_botan_version != BOTAN_VERSION_CODE_FOR(1,7,14), origin::system,
+      E(linked_botan_version != BOTAN_VERSION_CODE_FOR(1, 7, 14), origin::system,
         F("monotone does not support Botan 1.7.14"));
 
 #if BOTAN_VERSION_CODE <= BOTAN_VERSION_CODE_FOR(1,7,6)
-      E(linked_botan_version >= BOTAN_VERSION_CODE_FOR(1,6,3), origin::system,
+      E(linked_botan_version >= BOTAN_VERSION_CODE_FOR(1, 6, 3), origin::system,
         F("this monotone binary requires Botan 1.6.3 or newer"));
-      E(linked_botan_version <= BOTAN_VERSION_CODE_FOR(1,7,6), origin::system,
+      E(linked_botan_version <= BOTAN_VERSION_CODE_FOR(1, 7, 6), origin::system,
         F("this monotone binary does not work with Botan newer than 1.7.6"));
 #elif BOTAN_VERSION_CODE <= BOTAN_VERSION_CODE_FOR(1,7,22)
-      E(linked_botan_version > BOTAN_VERSION_CODE_FOR(1,7,6), origin::system,
+      E(linked_botan_version > BOTAN_VERSION_CODE_FOR(1, 7, 6), origin::system,
         F("this monotone binary requires Botan 1.7.7 or newer"));
       // While compiling against 1.7.22 or newer is recommended, because
       // it enables new features of Botan, the monotone binary compiled
       // against Botan 1.7.21 and before should still work with newer Botan
       // versions, including all of the stable branch 1.8.x.
-      E(linked_botan_version < BOTAN_VERSION_CODE_FOR(1,9,0), origin::system,
+      E(linked_botan_version < BOTAN_VERSION_CODE_FOR(1, 9, 0), origin::system,
         F("this monotone binary does not work with Botan 1.9.x"));
 #else
-      E(linked_botan_version > BOTAN_VERSION_CODE_FOR(1,7,22), origin::system,
+      E(linked_botan_version > BOTAN_VERSION_CODE_FOR(1, 7, 22), origin::system,
         F("this monotone binary requires Botan 1.7.22 or newer"));
-      E(linked_botan_version < BOTAN_VERSION_CODE_FOR(1,9,0), origin::system,
+      E(linked_botan_version < BOTAN_VERSION_CODE_FOR(1, 9, 0), origin::system,
         F("this monotone binary does not work with Botan 1.9.x"));
 #endif
 
@@ -185,8 +186,8 @@ cpp_main(int argc, char ** argv)
           app.reset_info.cmdline_args = args;
 
           options::opts::all_options().instantiate(&app.opts)
-            .from_command_line(app.reset_info.cmdline_args,
-                               option::concrete_option_set::preparse);
+          .from_command_line(app.reset_info.cmdline_args,
+                             option::concrete_option_set::preparse);
 
           if (app.opts.version_given)
             {
============================================================
--- src/packet.cc	f61360ed2524fdf53411bd24d022a2a3c9e6e9c7
+++ src/packet.cc	a49c13b27b42b100628d5badf57302a53eb3cc4e
@@ -118,7 +118,7 @@ namespace
 namespace
 {
   struct
-  feed_packet_consumer : public origin_aware
+    feed_packet_consumer : public origin_aware
   {
     size_t & count;
     packet_consumer & cons;
@@ -166,7 +166,7 @@ namespace
         {
           E(false, origin::user,
             F("malformed packet: invalid public key data for '%s': %s")
-              % name % e.what());
+            % name % e.what());
         }
     }
     void validate_private_key_data(string const & name, string const & keydata) const
@@ -185,7 +185,7 @@ namespace
         {
           E(false, origin::user,
             F("malformed packet: invalid private key data for '%s': %s")
-              % name % e.what());
+            % name % e.what());
         }
       // since we do not want to prompt for a password to decode it finally,
       // we ignore all other exceptions
@@ -234,14 +234,14 @@ namespace
       validate_base64(body);
 
       id src_hash(decode_hexenc_as<id>(src_id, made_from)),
-        dst_hash(decode_hexenc_as<id>(dst_id, made_from));
+         dst_hash(decode_hexenc_as<id>(dst_id, made_from));
       delta contents;
       unpack(base64<gzip<delta> >(body, made_from), contents);
       cons.consume_file_delta(file_id(src_hash),
                               file_id(dst_hash),
                               file_delta(contents));
     }
-    static void read_rest(istream& in, string& dest)
+    static void read_rest(istream & in, string & dest)
     {
 
       while (true)
@@ -260,7 +260,7 @@ namespace
       string name;   iss >> name;   validate_certname(name);
       string keyid;  iss >> keyid;  validate_id(keyid);
       string val;
-      read_rest(iss,val);           validate_arg_base64(val);
+      read_rest(iss, val);           validate_arg_base64(val);
 
       revision_id hash(decode_hexenc_as<revision_id>(certid, made_from));
       validate_base64(body);
@@ -290,7 +290,7 @@ namespace
       L(FL("read keypair packet"));
       string::size_type hashpos = body.find('#');
       string pub(body, 0, hashpos);
-      string priv(body, hashpos+1);
+      string priv(body, hashpos + 1);
 
       validate_key(args);
       validate_base64(pub);
@@ -348,7 +348,8 @@ extract_packets(string const & s, packet
 
   string::const_iterator p, tbeg, tend, abeg, aend, bbeg, bend;
 
-  enum extract_state {
+  enum extract_state
+  {
     skipping, open_bracket, scanning_type, found_type,
     scanning_args, found_args, scanning_body,
     end_1, end_2, end_3, end_4, end_5
@@ -417,7 +418,7 @@ static size_t
 // this is same as rfind, but search area is haystack[start:] (from start to end of string)
 // haystack is searched, needle is pattern
 static size_t
-rfind_in_substr(std::string const& haystack, size_t start, std::string const& needle)
+rfind_in_substr(std::string const & haystack, size_t start, std::string const & needle)
 {
   I(start <= haystack.size());
   const std::string::const_iterator result =
@@ -441,7 +442,7 @@ read_packets(istream & in, packet_consum
   while(in)
     {
       size_t const next_search_pos = (accum.size() >= end.size())
-                                      ? accum.size() - end.size() : 0;
+                                     ? accum.size() - end.size() : 0;
       in.read(buf, bufsz);
       accum.append(buf, in.gcount());
       string::size_type endpos = string::npos;
@@ -452,7 +453,7 @@ read_packets(istream & in, packet_consum
           string tmp = accum.substr(0, endpos);
           count += extract_packets(tmp, cons);
           if (endpos < accum.size() - 1)
-            accum = accum.substr(endpos+1);
+            accum = accum.substr(endpos + 1);
           else
             accum.clear();
         }
============================================================
--- src/rcs_file.cc	885b3fbe7b6cfed78816f0e57cd71d44616213c6
+++ src/rcs_file.cc	db458f88c1523182b66edb6491361404a254abb6
@@ -42,7 +42,7 @@ struct
 
 #ifdef HAVE_MMAP
 struct
-file_handle
+  file_handle
 {
   string const & filename;
   off_t length;
@@ -51,20 +51,20 @@ file_handle
     filename(fn),
     length(0),
     fd(-1)
-    {
-      struct stat st;
-      if (stat(fn.c_str(), &st) == -1)
-        throw oops("stat of " + filename + " failed");
-      length = st.st_size;
-      fd = open(filename.c_str(), O_RDONLY);
-      if (fd == -1)
-        throw oops("open of " + filename + " failed");
-    }
+  {
+    struct stat st;
+    if (stat(fn.c_str(), &st) == -1)
+      throw oops("stat of " + filename + " failed");
+    length = st.st_size;
+    fd = open(filename.c_str(), O_RDONLY);
+    if (fd == -1)
+      throw oops("open of " + filename + " failed");
+  }
   ~file_handle()
-    {
-      if (close(fd) == -1)
-        throw oops("close of " + filename + " failed");
-    }
+  {
+    if (close(fd) == -1)
+      throw oops("close of " + filename + " failed");
+  }
 };
 struct file_source
 {
@@ -112,7 +112,7 @@ struct
 };
 #elif defined(WIN32)
 struct
-file_handle
+  file_handle
 {
   string const & filename;
   off_t length;
@@ -121,31 +121,31 @@ file_handle
     filename(fn),
     length(0),
     fd(NULL)
-    {
-      struct stat st;
-      if (stat(fn.c_str(), &st) == -1)
-        throw oops("stat of " + filename + " failed");
-      length = st.st_size;
-      fd = CreateFile(fn.c_str(),
-                      GENERIC_READ,
-                      FILE_SHARE_READ,
-                      NULL,
-                      OPEN_EXISTING, 0, NULL);
-      if (fd == NULL)
-        throw oops("open of " + filename + " failed");
-    }
+  {
+    struct stat st;
+    if (stat(fn.c_str(), &st) == -1)
+      throw oops("stat of " + filename + " failed");
+    length = st.st_size;
+    fd = CreateFile(fn.c_str(),
+                    GENERIC_READ,
+                    FILE_SHARE_READ,
+                    NULL,
+                    OPEN_EXISTING, 0, NULL);
+    if (fd == NULL)
+      throw oops("open of " + filename + " failed");
+  }
   ~file_handle()
-    {
-      if (CloseHandle(fd)==0)
-        throw oops("close of " + filename + " failed");
-    }
+  {
+    if (CloseHandle(fd) == 0)
+      throw oops("close of " + filename + " failed");
+  }
 };
 
 struct
-file_source
+  file_source
 {
   string const & filename;
-  HANDLE fd,map;
+  HANDLE fd, map;
   off_t length;
   off_t pos;
   void * mapping;
@@ -177,17 +177,17 @@ file_source
     mapping(NULL)
   {
     map = CreateFileMapping(fd, NULL, PAGE_READONLY, 0, 0, NULL);
-    if (map==NULL)
+    if (map == NULL)
       throw oops("CreateFileMapping of " + filename + " failed");
     mapping = MapViewOfFile(map, FILE_MAP_READ, 0, 0, len);
-    if (mapping==NULL)
+    if (mapping == NULL)
       throw oops("MapViewOfFile of " + filename + " failed");
   }
   ~file_source()
   {
-    if (UnmapViewOfFile(mapping)==0)
+    if (UnmapViewOfFile(mapping) == 0)
       throw oops("UnmapViewOfFile of " + filename + " failed");
-    if (CloseHandle(map)==0)
+    if (CloseHandle(map) == 0)
       throw oops("CloseHandle of " + filename + " failed");
   }
 };
@@ -197,14 +197,14 @@ typedef enum
 #endif
 
 typedef enum
-  {
-    TOK_STRING,
-    TOK_SYMBOL,
-    TOK_NUM,
-    TOK_SEMI,
-    TOK_COLON,
-    TOK_NONE
-  }
+{
+  TOK_STRING,
+  TOK_SYMBOL,
+  TOK_NUM,
+  TOK_SEMI,
+  TOK_COLON,
+  TOK_NONE
+}
 token_type;
 
 static inline void
@@ -489,14 +489,14 @@ parse_rcs_file(string const & filename, 
 parse_rcs_file(string const & filename, rcs_file & r)
 {
 #if defined(HAVE_MMAP) || defined(WIN32)
-      file_handle handle(filename);
-      file_source ifs(filename, handle.fd, handle.length);
+  file_handle handle(filename);
+  file_source ifs(filename, handle.fd, handle.length);
 #else
-      ifstream ifs(filename.c_str());
-      ifs.unsetf(ios_base::skipws);
+  ifstream ifs(filename.c_str());
+  ifs.unsetf(ios_base::skipws);
 #endif
-      parser p(ifs, r);
-      p.parse_file();
+  parser p(ifs, r);
+  p.parse_file();
 }
 
 
============================================================
--- src/rcs_file.hh	23731152c2a5ac9eb8b6573eec50cd00a8522f54
+++ src/rcs_file.hh	cce84c91f063e91f836d310a7a9f6fd8e2534405
@@ -46,7 +46,7 @@ struct rcs_file
   void push_delta(rcs_delta const & d)
   {
     boost::shared_ptr<rcs_delta> dp(new rcs_delta(d));
-    deltas.insert(make_pair(dp->num,dp));
+    deltas.insert(make_pair(dp->num, dp));
   }
   void push_deltatext(rcs_deltatext const & dt)
   {
============================================================
--- src/rcs_import.cc	c2d6d57e4896d8c3e53821e7619933029c83f7e7
+++ src/rcs_import.cc	9b8acf7000b274e7de7aab2cdc8e999bf31b53e6
@@ -72,7 +72,7 @@ struct
 struct cvs_history;
 
 struct
-cvs_commit
+  cvs_commit
 {
   cvs_commit(rcs_file const & r,
              string const & rcs_version,
@@ -95,7 +95,7 @@ struct
 };
 
 struct
-cvs_branch
+  cvs_branch
 {
   bool has_a_branchpoint;
   bool has_a_commit;
@@ -158,7 +158,7 @@ struct
 };
 
 struct
-cvs_history
+  cvs_history
 {
 
   interner<unsigned long> branch_interner;
@@ -263,12 +263,12 @@ cvs_commit::cvs_commit(rcs_file const & 
     I(strptime(dp, "%Y.%m.%d.%H.%M.%S", &t) != NULL);
 #else
   I(sscanf(dp, "%d.%d.%d.%d.%d.%d", &(t.tm_year), &(t.tm_mon),
-           &(t.tm_mday), &(t.tm_hour), &(t.tm_min), &(t.tm_sec))==6);
+           &(t.tm_mday), &(t.tm_hour), &(t.tm_min), &(t.tm_sec)) == 6);
   t.tm_mon--;
   // Apparently some RCS files have 2 digit years, others four; tm always
   // wants a 2 (or 3) digit year (years since 1900).
   if (t.tm_year > 1900)
-    t.tm_year-=1900;
+    t.tm_year -= 1900;
 #endif
   time = mktime(&t);
   L(FL("= %i") % time);
@@ -285,8 +285,8 @@ cvs_commit::cvs_commit(rcs_file const & 
   path = cvs.curr_file_interned;
   version = cvs.file_version_interner.intern(ident.inner()());
 
-  typedef multimap<string,string>::const_iterator ity;
-  pair<ity,ity> range = r.admin.symbols.equal_range(rcs_version);
+  typedef multimap<string, string>::const_iterator ity;
+  pair<ity, ity> range = r.admin.symbols.equal_range(rcs_version);
   for (ity i = range.first; i != range.second; ++i)
     {
       if (i->first == rcs_version)
@@ -304,7 +304,7 @@ struct
 struct piece;
 
 struct
-piece_store
+  piece_store
 {
   vector< shared_ptr<rcs_deltatext> > texts;
   void index_deltatext(shared_ptr<rcs_deltatext> const & dt,
@@ -321,7 +321,7 @@ struct
 
 
 struct
-piece
+  piece
 {
   piece(string::size_type p, string::size_type l, unsigned long id) :
     pos(p), len(l), string_id(id) {}
@@ -387,7 +387,7 @@ process_one_hunk(vector< piece > const &
       char code;
       int pos, len;
       if (sscanf(directive.c_str(), " %c %d %d", &code, &pos, &len) != 3)
-              throw oops("illformed directive '" + directive + "'");
+        throw oops("illformed directive '" + directive + "'");
 
       if (code == 'a')
         {
@@ -589,21 +589,21 @@ process_branch(database & db,
       string next_version = r.deltas.find(curr_version)->second->next;
 
       if (! next_version.empty())
-      {
-         L(FL("following RCS edge %s -> %s") % curr_version % next_version);
+        {
+          L(FL("following RCS edge %s -> %s") % curr_version % next_version);
 
-         construct_version(*curr_lines, next_version, *next_lines, r);
-         L(FL("constructed RCS version %s, inserting into database") %
-           next_version);
+          construct_version(*curr_lines, next_version, *next_lines, r);
+          L(FL("constructed RCS version %s, inserting into database") %
+            next_version);
 
-         insert_into_db(db, curr_data, curr_id,
-                        *next_lines, next_data, next_id);
-      }
+          insert_into_db(db, curr_data, curr_id,
+                         *next_lines, next_data, next_id);
+        }
 
       // mark the beginning-of-branch time and state of this file if
       // we're at a branchpoint
-      typedef multimap<string,string>::const_iterator ity;
-      pair<ity,ity> range = cvs.branchpoints.equal_range(curr_version);
+      typedef multimap<string, string>::const_iterator ity;
+      pair<ity, ity> range = cvs.branchpoints.equal_range(curr_version);
       if (range.first != cvs.branchpoints.end()
           && range.first->first == curr_version)
         {
@@ -761,10 +761,10 @@ cvs_history::set_filename(string const &
   ui.set_tick_trailer(ss);
   ss.resize(ss.size() - 2);
   // remove Attic/ if present
-  string::size_type last_slash=ss.rfind('/');
-  if (last_slash!=string::npos && last_slash>=5
-        && ss.substr(last_slash-5,6)=="Attic/")
-     ss.erase(last_slash-5,6);
+  string::size_type last_slash = ss.rfind('/');
+  if (last_slash != string::npos && last_slash >= 5
+      && ss.substr(last_slash - 5, 6) == "Attic/")
+    ss.erase(last_slash - 5, 6);
   curr_file = file_path_internal(ss);
   curr_file_interned = path_interner.intern(ss);
 }
@@ -814,9 +814,9 @@ void cvs_history::index_branchpoint_symb
 
           first_entry_components = components;
           first_entry_components[first_entry_components.size() - 2]
-            = first_entry_components[first_entry_components.size() - 1];
+          = first_entry_components[first_entry_components.size() - 1];
           first_entry_components[first_entry_components.size() - 1]
-            = string("1");
+          = string("1");
 
           branchpoint_components = components;
           branchpoint_components.erase(branchpoint_components.end() - 2,
@@ -882,7 +882,7 @@ class
 
 
 class
-cvs_tree_walker
+  cvs_tree_walker
   : public tree_walker
 {
   cvs_history & cvs;
@@ -972,7 +972,7 @@ struct
 // the *latest* beginning time.
 
 struct
-cvs_cluster
+  cvs_cluster
 {
   time_t first_time;
   cvs_author author;
@@ -1005,7 +1005,7 @@ struct
 
 
 struct
-cluster_consumer
+  cluster_consumer
 {
   cvs_history & cvs;
   key_store & keys;
@@ -1055,7 +1055,7 @@ struct
 cluster_ptr;
 
 struct
-cluster_ptr_lt
+  cluster_ptr_lt
 {
   bool operator()(cluster_ptr const & a,
                   cluster_ptr const & b) const
@@ -1350,7 +1350,7 @@ cluster_consumer::store_auxiliary_certs(
        i != p.tags.end(); ++i)
     {
       map<unsigned long, pair<time_t, revision_id> >::const_iterator j
-        = cvs.resolved_tags.find(*i);
+      = cvs.resolved_tags.find(*i);
 
       if (j != cvs.resolved_tags.end())
         {
============================================================
--- src/sanity.cc	17e9e70774f064cfb8ef4e25075a1f493b63cd78
+++ src/sanity.cc	e9f64040e8c42e6d3a88966ac2825e50aaedd910
@@ -83,8 +83,8 @@ struct sanity::impl
   bool already_dumping;
   std::vector<MusingI const *> musings;
 
-  void (*out_of_band_function)(char channel, std::string const& text, void *opaque);
-  void *out_of_band_opaque;
+  void (*out_of_band_function)(char channel, std::string const & text, void * opaque);
+  void * out_of_band_opaque;
 
   impl() :
     verbosity(0), is_debug(false), logbuf(0xffff),
@@ -146,7 +146,7 @@ sanity::initialize(int argc, char ** arg
       av0.erase(av0.size() - 4);
     string::size_type last_slash = av0.find_last_of("/\\");
     if (last_slash != string::npos)
-      av0.erase(0, last_slash+1);
+      av0.erase(0, last_slash + 1);
     imp->real_prog_name = av0;
     prog_name_ptr = &imp->real_prog_name;
   }
@@ -179,8 +179,8 @@ sanity::dump_buffer()
           copy(imp->gasp_dump.begin(), imp->gasp_dump.end(),
                ostream_iterator<char>(out));
           inform_message((FL("wrote debugging log to %s\n"
-                        "if reporting a bug, please include this file")
-                       % imp->filename).str());
+                             "if reporting a bug, please include this file")
+                          % imp->filename).str());
         }
       else
         inform_message((FL("failed to write debugging log to %s")
@@ -250,9 +250,9 @@ sanity::do_format(format_base const & fm
   catch (exception & e)
     {
       inform_error((F("fatal: formatter failed on %s:%d: %s")
-                % file
-                % line
-                % e.what()).str());
+                    % file
+                    % line
+                    % e.what()).str());
       throw;
     }
 }
@@ -262,7 +262,7 @@ sanity::debug_p()
 {
   if (!imp)
     throw std::logic_error("sanity::debug_p called "
-                            "before sanity::initialize");
+                           "before sanity::initialize");
   return imp->is_debug;
 }
 
@@ -376,10 +376,10 @@ sanity::index_failure(char const * vec_e
                       char const * file, int line)
 {
   char const * pattern
-    = N_("%s:%d: index '%s' = %d overflowed vector '%s' with size %d");
+  = N_("%s:%d: index '%s' = %d overflowed vector '%s' with size %d");
   if (!imp)
     throw std::logic_error("sanity::index_failure occured "
-                            "before sanity::initialize");
+                           "before sanity::initialize");
   if (debug_p())
     log(FL(pattern) % file % line % idx_expr % idx % vec_expr % sz,
         file, line);
@@ -391,7 +391,7 @@ void
 // Last gasp dumps
 
 void
-sanity::push_musing(MusingI const *musing)
+sanity::push_musing(MusingI const * musing)
 {
   I(imp);
   if (!imp->already_dumping)
@@ -399,7 +399,7 @@ void
 }
 
 void
-sanity::pop_musing(MusingI const *musing)
+sanity::pop_musing(MusingI const * musing)
 {
   I(imp);
   if (!imp->already_dumping)
@@ -426,7 +426,7 @@ sanity::gasp()
   out << (F("Current work set: %i items") % imp->musings.size())
       << '\n'; // final newline is kept out of the translation
   for (vector<MusingI const *>::const_iterator
-         i = imp->musings.begin(); i != imp->musings.end(); ++i)
+       i = imp->musings.begin(); i != imp->musings.end(); ++i)
     {
       string tmp;
       try
@@ -457,13 +457,13 @@ sanity::gasp()
   imp->already_dumping = false;
 }
 
-void sanity::set_out_of_band_handler(void (*out_of_band_function)(char, std::string const&, void *), void *opaque_data)
+void sanity::set_out_of_band_handler(void (*out_of_band_function)(char, std::string const &, void *), void * opaque_data)
 {
-  imp->out_of_band_function= out_of_band_function;
-  imp->out_of_band_opaque= opaque_data;
+  imp->out_of_band_function = out_of_band_function;
+  imp->out_of_band_opaque = opaque_data;
 }
 
-bool sanity::maybe_write_to_out_of_band_handler(char channel, std::string const& str)
+bool sanity::maybe_write_to_out_of_band_handler(char channel, std::string const & str)
 {
   if (imp->out_of_band_function)
     {
@@ -536,7 +536,7 @@ void MusingBase::gasp_head(string & out)
 {
   out = (boost::format("----- begin '%s' (in %s, at %s:%d)\n")
          % name % func % file % line
-         ).str();
+        ).str();
 }
 
 void MusingBase::gasp_body(const string & objstr, string & out) const
@@ -546,7 +546,7 @@ void MusingBase::gasp_body(const string 
           % objstr
           % (*(objstr.end() - 1) == '\n' ? "" : "\n")
           % name % func % file % line
-          ).str();
+         ).str();
 }
 
 const locale &
@@ -571,7 +571,7 @@ struct
 }
 
 struct
-format_base::impl
+  format_base::impl
 {
   format fmt;
   ostringstream oss;
@@ -611,7 +611,7 @@ format_base::~format_base()
 
 format_base::~format_base()
 {
-        delete pimpl;
+  delete pimpl;
 }
 
 format_base &
@@ -642,12 +642,12 @@ format_base::format_base(char const * pa
 
 format_base::format_base(char const * pattern, bool use_locale)
   : pimpl(use_locale ? new impl(pattern, get_user_locale())
-                     : new impl(pattern))
+          : new impl(pattern))
 {}
 
 format_base::format_base(std::string const & pattern, bool use_locale)
   : pimpl(use_locale ? new impl(pattern, get_user_locale())
-                     : new impl(pattern))
+          : new impl(pattern))
 {}
 
 ostream &
============================================================
--- src/sanity.hh	1f70fd2034c34f0c83a683a59362fb8d1dbac625
+++ src/sanity.hh	f6540f47e37f207dab7225918e638a6b10094a1e
@@ -27,12 +27,14 @@
 // message to make it to the user, not a diagnostic error indicating
 // internal failure but a suggestion that they do something differently.
 
-namespace origin {
+namespace origin
+{
   std::string type_to_string(type t);
 }
 
 // An error that may have had an external source.
-class recoverable_failure : public std::runtime_error {
+class recoverable_failure : public std::runtime_error
+{
   origin::type _caused_by;
 public:
   recoverable_failure(origin::type o, std::string const & s)
@@ -43,7 +45,8 @@ public:
 
 // An error that indicates either an immediate logic bug or
 // a corrupt database. You don't want to catch these.
-class unrecoverable_failure : public std::logic_error {
+class unrecoverable_failure : public std::logic_error
+{
   origin::type _caused_by;
 public:
   unrecoverable_failure(origin::type o, std::string const & s)
@@ -58,7 +61,8 @@ struct i18n_format;
 struct plain_format;
 struct i18n_format;
 
-struct sanity {
+struct sanity
+{
   sanity();
   virtual ~sanity();
   virtual void initialize(int, char **, char const *);
@@ -72,12 +76,12 @@ struct sanity {
 
   // set out of band handler (e.g. for automate stdio)
   void set_out_of_band_handler(void (*out_of_band_function)(char channel,
-                                                            std::string const& text,
-                                                            void *opaque)=NULL,
-                               void *opaque_data=NULL);
+                                                            std::string const & text,
+                                                            void * opaque) = NULL,
+                               void * opaque_data = NULL);
 
   // if such an out of band handler is set, this directly writes to it
-  bool maybe_write_to_out_of_band_handler(char channel, std::string const& str);
+  bool maybe_write_to_out_of_band_handler(char channel, std::string const & str);
 
   // A couple of places need to look at the debug flag to avoid doing
   // expensive logging if it's off.
@@ -94,13 +98,13 @@ struct sanity {
                                 i18n_format const & explain,
                                 char const * file, int line));
   NORETURN(void index_failure(char const * vec_expr,
-                     char const * idx_expr,
-                     unsigned long sz,
-                     unsigned long idx,
-                     char const * file, int line));
+                              char const * idx_expr,
+                              unsigned long sz,
+                              unsigned long idx,
+                              char const * file, int line));
   void gasp();
-  void push_musing(MusingI const *musing);
-  void pop_musing(MusingI const *musing);
+  void push_musing(MusingI const * musing);
+  void pop_musing(MusingI const * musing);
 
   // debugging aid, see DUMP() below
   void print_var(std::string const & value,
@@ -112,10 +116,10 @@ private:
 private:
   std::string do_format(format_base const & fmt,
                         char const * file, int line);
-  virtual void inform_log(std::string const &msg) = 0;
-  virtual void inform_message(std::string const &msg) = 0;
-  virtual void inform_warning(std::string const &msg) = 0;
-  virtual void inform_error(std::string const &msg) = 0;
+  virtual void inform_log(std::string const & msg) = 0;
+  virtual void inform_message(std::string const & msg) = 0;
+  virtual void inform_warning(std::string const & msg) = 0;
+  virtual void inform_error(std::string const & msg) = 0;
 
   struct impl;
   impl * imp;
@@ -132,11 +136,11 @@ class
 // implement a single very small formatter.
 
 class
-format_base
+  format_base
 {
 protected:
   struct impl;
-  impl *pimpl;
+  impl * pimpl;
 
   format_base() : pimpl(NULL) {}
   ~format_base();
@@ -166,7 +170,7 @@ struct
 
 
 struct
-plain_format
+  plain_format
   : public format_base
 {
   plain_format()
@@ -242,7 +246,7 @@ struct
 
 
 struct
-i18n_format
+  i18n_format
   : public format_base
 {
   i18n_format()
@@ -351,7 +355,8 @@ do { \
 #define UNLIKELY(zz) (zz)
 #endif
 
-struct bad_decode {
+struct bad_decode
+{
   bad_decode(i18n_format const & fmt) : what(fmt.str()) {}
   std::string what;
 };
@@ -414,12 +419,14 @@ template <typename T>
 
 // remove_reference is a workaround for C++ defect #106.
 template <typename T>
-struct remove_reference {
+struct remove_reference
+{
   typedef T type;
 };
 
 template <typename T>
-struct remove_reference <T &> {
+struct remove_reference <T &>
+{
   typedef typename remove_reference<T>::type type;
 };
 
@@ -484,7 +491,7 @@ template <typename T> void
 // debugging utility to dump out vars like MM but without requiring a crash
 
 template <typename T> void
-dump(T const & t, char const *var,
+dump(T const & t, char const * var,
      char const * file, int const line, char const * func)
 {
   std::string value;
============================================================
--- src/transforms.cc	cdfb6854ef4992faba21074145f3c8269b845b11
+++ src/transforms.cc	4702233125ffc5c75646d22cab4cda8298d3c3d5
@@ -68,7 +68,7 @@ error_in_transform(Botan::Exception & e,
       // "botan: TYPE: " part...
       string w(e.what());
       string::size_type pos = w.find(':');
-      pos = w.find(':', pos+1);
+      pos = w.find(':', pos + 1);
       w = string(w.begin() + pos + 2, w.end());
 
       // ... downcase the rest of it and replace underscores with spaces.
@@ -122,11 +122,11 @@ template<> string xform<Botan::Hex_Encod
                                             origin::type made_from)
 {
   string out;
-  out.reserve(in.size()<<1);
+  out.reserve(in.size() << 1);
   for (string::const_iterator i = in.begin();
        i != in.end(); ++i)
     {
-      int h = (*i>>4) & 0x0f;
+      int h = (*i >> 4) & 0x0f;
       if (h < 10)
         out.push_back(h + '0');
       else
@@ -144,7 +144,7 @@ template<> string xform<Botan::Hex_Decod
                                             origin::type made_from)
 {
   string out;
-  out.reserve(in.size()>>1);
+  out.reserve(in.size() >> 1);
   bool high(true);
   int o = 0;
   for (string::const_iterator i = in.begin();
@@ -190,7 +190,8 @@ template<> string xform<Botan::Hex_Decod
       high = !high;
     }
   if (!high)
-    { // Hex string wasn't a whole number of bytes
+    {
+      // Hex string wasn't a whole number of bytes
       //I(false); // Drop the last char (!!)
     }
   return out;
============================================================
--- src/transforms.hh	301029c9284881c127a16fc610a8c22eb670cdde
+++ src/transforms.hh	4d6ebfd400a6e83d882f3281fb0f1a70e92f5b61
@@ -17,7 +17,8 @@
 // transforms.cc for the implementations (most of which are delegations to
 // crypto++ and librsync)
 
-namespace Botan {
+namespace Botan
+{
   class Base64_Encoder;
   class Base64_Decoder;
   class Hex_Encoder;
@@ -33,7 +34,8 @@ xform(std::string const & in, origin::ty
 xform(std::string const & in, origin::type made_from)
 {
   enum dummy { d = (sizeof(struct xform_must_be_specialized_for_this_type)
-                    == sizeof(XFM)) };
+                    == sizeof(XFM))
+             };
   return in; // avoid warnings about no return statement
 }
 
@@ -99,7 +101,7 @@ template<typename T> T decode_hexenc_as(
                                  origin::type made_from)
 { return xform<Botan::Hex_Decoder>(in, made_from); }
 template<typename T> T decode_hexenc_as(std::string const & in,
-                                               origin::type made_from)
+                                        origin::type made_from)
 { return T(decode_hexenc(in, made_from), made_from); }
 
 
============================================================
--- src/vocab.cc	4c73af08de6dd30f710896835673361ee019466e
+++ src/vocab.cc	be19f581df388e5cac09c0d0cb97714ce43b34f4
@@ -132,7 +132,7 @@ struct
 // of making the ATOMIC(foo) values constructed within a symbol table
 // scope share string storage.
 struct
-symtab_impl
+  symtab_impl
 {
   typedef hashmap::hash_set<string> hset;
   hset vals;
@@ -190,7 +190,7 @@ template void dump(key_id const & r, str
 template void dump(file_id const & r, string &);
 template void dump(hexenc<id> const & r, string &);
 template void dump(key_id const & r, string &);
-template void dump(rsa_pub_key const&, string &);
+template void dump(rsa_pub_key const &, string &);
 template void dump(roster_data const & d, string &);
 template void dump(roster_delta const & d, string &);
 template void dump(manifest_data const & d, string &);
============================================================
--- src/work.cc	36e5dcda8cf09c9054cb88e6165707112ba9ac03
+++ src/work.cc	8815821392dd36a8fb3e71583615b278e4ca80b2
@@ -487,7 +487,7 @@ read_options_file(any_path const & optsp
         {
           E(val != memory_db_identifier, origin::user,
             F("a memory database '%s' cannot be used in a workspace")
-                % memory_db_identifier);
+            % memory_db_identifier);
 
           if (val.find(':') == 0)
             {
@@ -533,13 +533,12 @@ write_options_file(bookkeeping_path cons
 
   E(opts.dbname_type != memory_db, origin::user,
     F("a memory database '%s' cannot be used in a workspace")
-      % memory_db_identifier);
+    % memory_db_identifier);
 
   // if we have both, alias and full path, prefer the alias
   if (opts.dbname_type == managed_db && !opts.dbname_alias.empty())
     st.push_str_pair(symbol("database"), opts.dbname_alias);
-  else
-  if (opts.dbname_type == unmanaged_db && !opts.dbname.as_internal().empty())
+  else if (opts.dbname_type == unmanaged_db && !opts.dbname.as_internal().empty())
     st.push_str_pair(symbol("database"), opts.dbname.as_internal());
 
   if (!opts.branch().empty())
@@ -582,7 +581,8 @@ workspace::get_options(options & opts)
     }
 
   if (!opts.key_dir_given && !opts.conf_dir_given && cur_opts.key_dir_given)
-    { // if empty/missing, we want to keep the default
+    {
+      // if empty/missing, we want to keep the default
       opts.key_dir = cur_opts.key_dir;
       // one would expect that we should set the key_dir_given flag here, but
       // we do not because of the interaction between --confdir and --keydir.
@@ -620,7 +620,7 @@ workspace::maybe_set_options(options con
 workspace::maybe_set_options(options const & opts, lua_hooks & lua)
 {
   if (workspace::found && workspace::used)
-      set_options(opts, lua, false);
+    set_options(opts, lua, false);
 }
 
 // This function should usually be called at the (successful)
@@ -644,10 +644,10 @@ workspace::set_options(options const & o
   // as is in _MTN/options, not write out an empty option.
   options cur_opts;
   if (file_exists(o_path))
-  {
-    read_options_file(o_path, cur_opts);
-    helper.get_database_path(cur_opts, old_db_path);
-  }
+    {
+      read_options_file(o_path, cur_opts);
+      helper.get_database_path(cur_opts, old_db_path);
+    }
 
   bool options_changed = false;
   if (old_db_path != new_db_path && file_exists(new_db_path))
@@ -737,10 +737,10 @@ namespace syms
 
 namespace syms
 {
-    symbol const start("start");
-    symbol const good("good");
-    symbol const bad("bad");
-    symbol const skipped("skipped");
+  symbol const start("start");
+  symbol const good("good");
+  symbol const bad("bad");
+  symbol const skipped("skipped");
 };
 
 void
@@ -979,317 +979,318 @@ workspace::init_attributes(file_path con
 }
 
 // objects and routines for manipulating the workspace itself
-namespace {
+namespace
+{
 
-struct file_itemizer : public tree_walker
-{
-  database & db;
-  workspace & work;
-  set<file_path> & known;
-  set<file_path> & unknown;
-  set<file_path> & ignored;
-  path_restriction const & mask;
-  file_itemizer(database & db, workspace & work,
-                set<file_path> & k,
-                set<file_path> & u,
-                set<file_path> & i,
-                path_restriction const & r)
-    : db(db), work(work), known(k), unknown(u), ignored(i), mask(r) {}
-  virtual bool visit_dir(file_path const & path);
-  virtual void visit_file(file_path const & path);
-};
+  struct file_itemizer : public tree_walker
+  {
+    database & db;
+    workspace & work;
+    set<file_path> & known;
+    set<file_path> & unknown;
+    set<file_path> & ignored;
+    path_restriction const & mask;
+    file_itemizer(database & db, workspace & work,
+                  set<file_path> & k,
+                  set<file_path> & u,
+                  set<file_path> & i,
+                  path_restriction const & r)
+      : db(db), work(work), known(k), unknown(u), ignored(i), mask(r) {}
+    virtual bool visit_dir(file_path const & path);
+    virtual void visit_file(file_path const & path);
+  };
 
 
-bool
-file_itemizer::visit_dir(file_path const & path)
-{
-  this->visit_file(path);
-  return known.find(path) != known.end();
-}
+  bool
+  file_itemizer::visit_dir(file_path const & path)
+  {
+    this->visit_file(path);
+    return known.find(path) != known.end();
+  }
 
-void
-file_itemizer::visit_file(file_path const & path)
-{
-  if (mask.includes(path) && known.find(path) == known.end())
-    {
-      if (work.ignore_file(path) || db.is_dbfile(path))
-        ignored.insert(path);
-      else
-        unknown.insert(path);
-    }
-}
+  void
+  file_itemizer::visit_file(file_path const & path)
+  {
+    if (mask.includes(path) && known.find(path) == known.end())
+      {
+        if (work.ignore_file(path) || db.is_dbfile(path))
+          ignored.insert(path);
+        else
+          unknown.insert(path);
+      }
+  }
 
 
-struct workspace_itemizer : public tree_walker
-{
-  roster_t & roster;
-  set<file_path> const & known;
-  node_id_source & nis;
+  struct workspace_itemizer : public tree_walker
+  {
+    roster_t & roster;
+    set<file_path> const & known;
+    node_id_source & nis;
 
-  workspace_itemizer(roster_t & roster, set<file_path> const & paths,
-                     node_id_source & nis);
-  virtual bool visit_dir(file_path const & path);
-  virtual void visit_file(file_path const & path);
-};
+    workspace_itemizer(roster_t & roster, set<file_path> const & paths,
+                       node_id_source & nis);
+    virtual bool visit_dir(file_path const & path);
+    virtual void visit_file(file_path const & path);
+  };
 
-workspace_itemizer::workspace_itemizer(roster_t & roster,
-                                       set<file_path> const & paths,
-                                       node_id_source & nis)
+  workspace_itemizer::workspace_itemizer(roster_t & roster,
+                                         set<file_path> const & paths,
+                                         node_id_source & nis)
     : roster(roster), known(paths), nis(nis)
-{
-  node_id root_nid = roster.create_dir_node(nis);
-  roster.attach_node(root_nid, file_path_internal(""));
-}
+  {
+    node_id root_nid = roster.create_dir_node(nis);
+    roster.attach_node(root_nid, file_path_internal(""));
+  }
 
-bool
-workspace_itemizer::visit_dir(file_path const & path)
-{
-  node_id nid = roster.create_dir_node(nis);
-  roster.attach_node(nid, path);
-  return known.find(path) != known.end();
-}
+  bool
+  workspace_itemizer::visit_dir(file_path const & path)
+  {
+    node_id nid = roster.create_dir_node(nis);
+    roster.attach_node(nid, path);
+    return known.find(path) != known.end();
+  }
 
-void
-workspace_itemizer::visit_file(file_path const & path)
-{
-  file_id fid;
-  node_id nid = roster.create_file_node(fid, nis);
-  roster.attach_node(nid, path);
-}
+  void
+  workspace_itemizer::visit_file(file_path const & path)
+  {
+    file_id fid;
+    node_id nid = roster.create_file_node(fid, nis);
+    roster.attach_node(nid, path);
+  }
 
 
-class
-addition_builder
-  : public tree_walker
-{
-  database & db;
-  workspace & work;
-  roster_t & ros;
-  editable_roster_base & er;
-  bool respect_ignore;
-  bool recursive;
-public:
-  addition_builder(database & db, workspace & work,
-                   roster_t & r, editable_roster_base & e,
-                   bool i, bool rec)
-    : db(db), work(work), ros(r), er(e), respect_ignore(i), recursive(rec)
-  {}
-  virtual bool visit_dir(file_path const & path);
-  virtual void visit_file(file_path const & path);
-  void add_nodes_for(file_path const & path, file_path const & goal);
-};
+  class
+    addition_builder
+    : public tree_walker
+  {
+    database & db;
+    workspace & work;
+    roster_t & ros;
+    editable_roster_base & er;
+    bool respect_ignore;
+    bool recursive;
+  public:
+    addition_builder(database & db, workspace & work,
+                     roster_t & r, editable_roster_base & e,
+                     bool i, bool rec)
+      : db(db), work(work), ros(r), er(e), respect_ignore(i), recursive(rec)
+    {}
+    virtual bool visit_dir(file_path const & path);
+    virtual void visit_file(file_path const & path);
+    void add_nodes_for(file_path const & path, file_path const & goal);
+  };
 
-void
-addition_builder::add_nodes_for(file_path const & path,
-                                file_path const & goal)
-{
-  // this check suffices to terminate the recursion; our caller guarantees
-  // that the roster has a root node, which will be a directory.
-  if (ros.has_node(path))
-    {
-      E(is_dir_t(ros.get_node(path)), origin::user,
-        F("cannot add '%s', because '%s' is recorded as a file "
-          "in the workspace manifest") % goal % path);
-      return;
-    }
+  void
+  addition_builder::add_nodes_for(file_path const & path,
+                                  file_path const & goal)
+  {
+    // this check suffices to terminate the recursion; our caller guarantees
+    // that the roster has a root node, which will be a directory.
+    if (ros.has_node(path))
+      {
+        E(is_dir_t(ros.get_node(path)), origin::user,
+          F("cannot add '%s', because '%s' is recorded as a file "
+            "in the workspace manifest") % goal % path);
+        return;
+      }
 
-  add_nodes_for(path.dirname(), goal);
-  P(F("adding '%s' to workspace manifest") % path);
+    add_nodes_for(path.dirname(), goal);
+    P(F("adding '%s' to workspace manifest") % path);
 
-  node_id nid = the_null_node;
-  switch (get_path_status(path))
-    {
-    case path::nonexistent:
-      return;
-    case path::file:
+    node_id nid = the_null_node;
+    switch (get_path_status(path))
       {
+      case path::nonexistent:
+        return;
+      case path::file:
+      {
         file_id ident;
         I(ident_existing_file(path, ident));
         nid = er.create_file_node(ident);
       }
       break;
-    case path::directory:
-      nid = er.create_dir_node();
-      break;
-    }
+      case path::directory:
+        nid = er.create_dir_node();
+        break;
+      }
 
-  I(nid != the_null_node);
-  er.attach_node(nid, path);
+    I(nid != the_null_node);
+    er.attach_node(nid, path);
 
-  work.init_attributes(path, er);
-}
+    work.init_attributes(path, er);
+  }
 
 
-bool
-addition_builder::visit_dir(file_path const & path)
-{
-  struct directory_has_unignored_files_exception {};
-  struct directory_has_unignored_files : public dirent_consumer
+  bool
+  addition_builder::visit_dir(file_path const & path)
   {
-    directory_has_unignored_files(workspace & work, file_path const & p)
-      : work(work), p(p) {}
-    virtual void consume(char const * s)
+    struct directory_has_unignored_files_exception {};
+    struct directory_has_unignored_files : public dirent_consumer
     {
-      try
-        {
-          file_path entry = p / path_component(s);
-          if (!work.ignore_file(entry))
-            throw directory_has_unignored_files_exception();
-        }
-      catch (std::logic_error)
-        {
-          // ignore this file for purposes of the warning; this file
-          // wouldn't have been added by a recursive add anyway.
-        }
-    }
-  private:
-    workspace & work;
-    file_path const & p;
-  };
+      directory_has_unignored_files(workspace & work, file_path const & p)
+        : work(work), p(p) {}
+      virtual void consume(char const * s)
+      {
+        try
+          {
+            file_path entry = p / path_component(s);
+            if (!work.ignore_file(entry))
+              throw directory_has_unignored_files_exception();
+          }
+        catch (std::logic_error)
+          {
+            // ignore this file for purposes of the warning; this file
+            // wouldn't have been added by a recursive add anyway.
+          }
+      }
+    private:
+      workspace & work;
+      file_path const & p;
+    };
 
-  if (!recursive)
-    {
-      bool warn = false;
+    if (!recursive)
+      {
+        bool warn = false;
 
-      // If the db can ever be stored in a dir
-      // then revisit this logic
-      I(!db.is_dbfile(path));
+        // If the db can ever be stored in a dir
+        // then revisit this logic
+        I(!db.is_dbfile(path));
 
-      if (!respect_ignore)
-        warn = !directory_empty(path);
-      else if (!work.ignore_file(path))
-        {
-          directory_has_unignored_files dhuf(work, path);
-          try
-            {
-              read_directory(path, dhuf, dhuf, dhuf);
-            }
-          catch (directory_has_unignored_files_exception)
-            {
-              warn = true;
-            }
-        }
+        if (!respect_ignore)
+          warn = !directory_empty(path);
+        else if (!work.ignore_file(path))
+          {
+            directory_has_unignored_files dhuf(work, path);
+            try
+              {
+                read_directory(path, dhuf, dhuf, dhuf);
+              }
+            catch (directory_has_unignored_files_exception)
+              {
+                warn = true;
+              }
+          }
 
-      if (warn)
-        W(F("non-recursive add: Files in the directory '%s' "
-            "will not be added automatically.") % path);
-    }
+        if (warn)
+          W(F("non-recursive add: Files in the directory '%s' "
+              "will not be added automatically.") % path);
+      }
 
-  this->visit_file(path);
-  return true;
-}
+    this->visit_file(path);
+    return true;
+  }
 
-void
-addition_builder::visit_file(file_path const & path)
-{
-  if ((respect_ignore && work.ignore_file(path)) || db.is_dbfile(path))
-    {
-      P(F("skipping ignorable file '%s'") % path);
-      return;
-    }
+  void
+  addition_builder::visit_file(file_path const & path)
+  {
+    if ((respect_ignore && work.ignore_file(path)) || db.is_dbfile(path))
+      {
+        P(F("skipping ignorable file '%s'") % path);
+        return;
+      }
 
-  if (ros.has_node(path))
-    {
-      if (!path.empty())
-        P(F("skipping '%s', already accounted for in workspace") % path);
-      return;
-    }
+    if (ros.has_node(path))
+      {
+        if (!path.empty())
+          P(F("skipping '%s', already accounted for in workspace") % path);
+        return;
+      }
 
-  I(ros.has_root());
-  add_nodes_for(path, path);
-}
+    I(ros.has_root());
+    add_nodes_for(path, path);
+  }
 
-struct editable_working_tree : public editable_tree
-{
-  editable_working_tree(workspace & work, lua_hooks & lua,
-                        content_merge_adaptor const & source,
-                        bool const messages)
-    : work(work), lua(lua), source(source), next_nid(1),
-      root_dir_attached(true), messages(messages)
-  {};
+  struct editable_working_tree : public editable_tree
+  {
+    editable_working_tree(workspace & work, lua_hooks & lua,
+                          content_merge_adaptor const & source,
+                          bool const messages)
+      : work(work), lua(lua), source(source), next_nid(1),
+        root_dir_attached(true), messages(messages)
+    {};
 
-  virtual node_id detach_node(file_path const & src);
-  virtual void drop_detached_node(node_id nid);
+    virtual node_id detach_node(file_path const & src);
+    virtual void drop_detached_node(node_id nid);
 
-  virtual node_id create_dir_node();
-  virtual node_id create_file_node(file_id const & content);
-  virtual void attach_node(node_id nid, file_path const & dst);
+    virtual node_id create_dir_node();
+    virtual node_id create_file_node(file_id const & content);
+    virtual void attach_node(node_id nid, file_path const & dst);
 
-  virtual void apply_delta(file_path const & pth,
-                           file_id const & old_id,
-                           file_id const & new_id);
-  virtual void clear_attr(file_path const & path,
-                          attr_key const & key);
-  virtual void set_attr(file_path const & path,
-                        attr_key const & key,
-                        attr_value const & val);
+    virtual void apply_delta(file_path const & pth,
+                             file_id const & old_id,
+                             file_id const & new_id);
+    virtual void clear_attr(file_path const & path,
+                            attr_key const & key);
+    virtual void set_attr(file_path const & path,
+                          attr_key const & key,
+                          attr_value const & val);
 
-  virtual void commit();
+    virtual void commit();
 
-  virtual ~editable_working_tree();
-private:
-  workspace & work;
-  lua_hooks & lua;
-  content_merge_adaptor const & source;
-  node_id next_nid;
-  std::map<bookkeeping_path, file_path> rename_add_drop_map;
-  bool root_dir_attached;
-  bool messages;
-};
+    virtual ~editable_working_tree();
+  private:
+    workspace & work;
+    lua_hooks & lua;
+    content_merge_adaptor const & source;
+    node_id next_nid;
+    std::map<bookkeeping_path, file_path> rename_add_drop_map;
+    bool root_dir_attached;
+    bool messages;
+  };
 
 
-struct simulated_working_tree : public editable_tree
-{
-  roster_t & workspace;
-  node_id_source & nis;
+  struct simulated_working_tree : public editable_tree
+  {
+    roster_t & workspace;
+    node_id_source & nis;
 
-  set<file_path> blocked_paths;
-  set<file_path> conflicting_paths;
-  int conflicts;
-  map<node_id, file_path> nid_map;
+    set<file_path> blocked_paths;
+    set<file_path> conflicting_paths;
+    int conflicts;
+    map<node_id, file_path> nid_map;
 
-  simulated_working_tree(roster_t & r, temp_node_id_source & n)
-    : workspace(r), nis(n), conflicts(0) {}
+    simulated_working_tree(roster_t & r, temp_node_id_source & n)
+      : workspace(r), nis(n), conflicts(0) {}
 
-  virtual node_id detach_node(file_path const & src);
-  virtual void drop_detached_node(node_id nid);
+    virtual node_id detach_node(file_path const & src);
+    virtual void drop_detached_node(node_id nid);
 
-  virtual node_id create_dir_node();
-  virtual node_id create_file_node(file_id const & content);
-  virtual void attach_node(node_id nid, file_path const & dst);
+    virtual node_id create_dir_node();
+    virtual node_id create_file_node(file_id const & content);
+    virtual void attach_node(node_id nid, file_path const & dst);
 
-  virtual void apply_delta(file_path const & pth,
-                           file_id const & old_id,
-                           file_id const & new_id);
-  virtual void clear_attr(file_path const & path,
-                          attr_key const & key);
-  virtual void set_attr(file_path const & path,
-                        attr_key const & key,
-                        attr_value const & val);
+    virtual void apply_delta(file_path const & pth,
+                             file_id const & old_id,
+                             file_id const & new_id);
+    virtual void clear_attr(file_path const & path,
+                            attr_key const & key);
+    virtual void set_attr(file_path const & path,
+                          attr_key const & key,
+                          attr_value const & val);
 
-  virtual void commit();
+    virtual void commit();
 
-  virtual bool has_conflicting_paths() const { return conflicting_paths.size() > 0; }
-  virtual set<file_path> get_conflicting_paths() const { return conflicting_paths; }
+    virtual bool has_conflicting_paths() const { return conflicting_paths.size() > 0; }
+    virtual set<file_path> get_conflicting_paths() const { return conflicting_paths; }
 
-  virtual ~simulated_working_tree();
-};
+    virtual ~simulated_working_tree();
+  };
 
 
 // editable_working_tree implementation
 
-static inline bookkeeping_path
-path_for_detached_nids()
-{
-  return bookkeeping_root / "detached";
-}
+  static inline bookkeeping_path
+  path_for_detached_nids()
+  {
+    return bookkeeping_root / "detached";
+  }
 
-static inline bookkeeping_path
-path_for_detached_nid(node_id nid)
-{
-  return path_for_detached_nids() / path_component(lexical_cast<string>(nid),
-                                                   origin::internal);
-}
+  static inline bookkeeping_path
+  path_for_detached_nid(node_id nid)
+  {
+    return path_for_detached_nids() / path_component(lexical_cast<string>(nid),
+                                                     origin::internal);
+  }
 
 // Attaching/detaching the root directory:
 //   This is tricky, because we don't want to simply move it around, like
@@ -1309,274 +1310,274 @@ path_for_detached_nid(node_id nid)
 // this would require that we know our root directory's name relative to its
 // parent.
 
-node_id
-editable_working_tree::detach_node(file_path const & src_pth)
-{
-  I(root_dir_attached);
-  node_id nid = next_nid++;
-  bookkeeping_path dst_pth = path_for_detached_nid(nid);
-  safe_insert(rename_add_drop_map, make_pair(dst_pth, src_pth));
-  if (src_pth == file_path())
-    {
-      // root dir detach, so we move contents, rather than the dir itself
-      mkdir_p(dst_pth);
+  node_id
+  editable_working_tree::detach_node(file_path const & src_pth)
+  {
+    I(root_dir_attached);
+    node_id nid = next_nid++;
+    bookkeeping_path dst_pth = path_for_detached_nid(nid);
+    safe_insert(rename_add_drop_map, make_pair(dst_pth, src_pth));
+    if (src_pth == file_path())
+      {
+        // root dir detach, so we move contents, rather than the dir itself
+        mkdir_p(dst_pth);
 
-      vector<file_path> files, dirs;
-      fill_path_vec<file_path> fill_files(src_pth, files, false);
-      fill_path_vec<file_path> fill_dirs(src_pth, dirs, true);
-      read_directory(src_pth, fill_files, fill_dirs);
+        vector<file_path> files, dirs;
+        fill_path_vec<file_path> fill_files(src_pth, files, false);
+        fill_path_vec<file_path> fill_dirs(src_pth, dirs, true);
+        read_directory(src_pth, fill_files, fill_dirs);
 
-      for (vector<file_path>::const_iterator i = files.begin();
-           i != files.end(); ++i)
-        move_file(*i, dst_pth / (*i).basename());
-      for (vector<file_path>::const_iterator i = dirs.begin();
-           i != dirs.end(); ++i)
-        move_dir(*i, dst_pth / (*i).basename());
+        for (vector<file_path>::const_iterator i = files.begin();
+             i != files.end(); ++i)
+          move_file(*i, dst_pth / (*i).basename());
+        for (vector<file_path>::const_iterator i = dirs.begin();
+             i != dirs.end(); ++i)
+          move_dir(*i, dst_pth / (*i).basename());
 
-      root_dir_attached = false;
-    }
-  else
-    move_path(src_pth, dst_pth);
-  return nid;
-}
+        root_dir_attached = false;
+      }
+    else
+      move_path(src_pth, dst_pth);
+    return nid;
+  }
 
-void
-editable_working_tree::drop_detached_node(node_id nid)
-{
-  bookkeeping_path pth = path_for_detached_nid(nid);
-  map<bookkeeping_path, file_path>::const_iterator i
+  void
+  editable_working_tree::drop_detached_node(node_id nid)
+  {
+    bookkeeping_path pth = path_for_detached_nid(nid);
+    map<bookkeeping_path, file_path>::const_iterator i
     = rename_add_drop_map.find(pth);
-  I(i != rename_add_drop_map.end());
-  P(F("dropping '%s'") % i->second);
-  safe_erase(rename_add_drop_map, pth);
-  delete_file_or_dir_shallow(pth);
-}
+    I(i != rename_add_drop_map.end());
+    P(F("dropping '%s'") % i->second);
+    safe_erase(rename_add_drop_map, pth);
+    delete_file_or_dir_shallow(pth);
+  }
 
-node_id
-editable_working_tree::create_dir_node()
-{
-  node_id nid = next_nid++;
-  bookkeeping_path pth = path_for_detached_nid(nid);
-  require_path_is_nonexistent(pth,
-                              F("path '%s' already exists") % pth);
-  mkdir_p(pth);
-  return nid;
-}
+  node_id
+  editable_working_tree::create_dir_node()
+  {
+    node_id nid = next_nid++;
+    bookkeeping_path pth = path_for_detached_nid(nid);
+    require_path_is_nonexistent(pth,
+                                F("path '%s' already exists") % pth);
+    mkdir_p(pth);
+    return nid;
+  }
 
-node_id
-editable_working_tree::create_file_node(file_id const & content)
-{
-  node_id nid = next_nid++;
-  bookkeeping_path pth = path_for_detached_nid(nid);
-  require_path_is_nonexistent(pth,
-                              F("path '%s' already exists") % pth);
-  file_data dat;
-  source.get_version(content, dat);
-  write_data(pth, dat.inner());
+  node_id
+  editable_working_tree::create_file_node(file_id const & content)
+  {
+    node_id nid = next_nid++;
+    bookkeeping_path pth = path_for_detached_nid(nid);
+    require_path_is_nonexistent(pth,
+                                F("path '%s' already exists") % pth);
+    file_data dat;
+    source.get_version(content, dat);
+    write_data(pth, dat.inner());
 
-  return nid;
-}
+    return nid;
+  }
 
-void
-editable_working_tree::attach_node(node_id nid, file_path const & dst_pth)
-{
-  bookkeeping_path src_pth = path_for_detached_nid(nid);
+  void
+  editable_working_tree::attach_node(node_id nid, file_path const & dst_pth)
+  {
+    bookkeeping_path src_pth = path_for_detached_nid(nid);
 
-  map<bookkeeping_path, file_path>::const_iterator i
+    map<bookkeeping_path, file_path>::const_iterator i
     = rename_add_drop_map.find(src_pth);
-  if (i != rename_add_drop_map.end())
-    {
-      if (messages)
-        P(F("renaming '%s' to '%s'") % i->second % dst_pth);
-      safe_erase(rename_add_drop_map, src_pth);
-    }
-  else if (messages)
-     P(F("adding '%s'") % dst_pth);
+    if (i != rename_add_drop_map.end())
+      {
+        if (messages)
+          P(F("renaming '%s' to '%s'") % i->second % dst_pth);
+        safe_erase(rename_add_drop_map, src_pth);
+      }
+    else if (messages)
+      P(F("adding '%s'") % dst_pth);
 
-  if (dst_pth == file_path())
-    {
-      // root dir attach, so we move contents, rather than the dir itself
-      vector<bookkeeping_path> files, dirs;
-      fill_path_vec<bookkeeping_path> fill_files(src_pth, files, false);
-      fill_path_vec<bookkeeping_path> fill_dirs(src_pth, dirs, true);
-      read_directory(src_pth, fill_files, fill_dirs);
+    if (dst_pth == file_path())
+      {
+        // root dir attach, so we move contents, rather than the dir itself
+        vector<bookkeeping_path> files, dirs;
+        fill_path_vec<bookkeeping_path> fill_files(src_pth, files, false);
+        fill_path_vec<bookkeeping_path> fill_dirs(src_pth, dirs, true);
+        read_directory(src_pth, fill_files, fill_dirs);
 
-      for (vector<bookkeeping_path>::const_iterator i = files.begin();
-           i != files.end(); ++i)
-        move_file(*i, dst_pth / (*i).basename());
-      for (vector<bookkeeping_path>::const_iterator i = dirs.begin();
-           i != dirs.end(); ++i)
-        move_dir(*i, dst_pth / (*i).basename());
+        for (vector<bookkeeping_path>::const_iterator i = files.begin();
+             i != files.end(); ++i)
+          move_file(*i, dst_pth / (*i).basename());
+        for (vector<bookkeeping_path>::const_iterator i = dirs.begin();
+             i != dirs.end(); ++i)
+          move_dir(*i, dst_pth / (*i).basename());
 
-      delete_dir_shallow(src_pth);
-      root_dir_attached = true;
-    }
-  else
-    // This will complain if the move is actually impossible
-    move_path(src_pth, dst_pth);
-}
+        delete_dir_shallow(src_pth);
+        root_dir_attached = true;
+      }
+    else
+      // This will complain if the move is actually impossible
+      move_path(src_pth, dst_pth);
+  }
 
-void
-editable_working_tree::apply_delta(file_path const & pth,
-                                   file_id const & old_id,
-                                   file_id const & new_id)
-{
-  require_path_is_file(pth,
-                       F("file '%s' does not exist") % pth,
-                       F("file '%s' is a directory") % pth);
-  file_id curr_id;
-  calculate_ident(pth, curr_id);
-  E(curr_id == old_id, origin::system,
-    F("content of file '%s' has changed, not overwriting") % pth);
-  P(F("updating '%s'") % pth);
+  void
+  editable_working_tree::apply_delta(file_path const & pth,
+                                     file_id const & old_id,
+                                     file_id const & new_id)
+  {
+    require_path_is_file(pth,
+                         F("file '%s' does not exist") % pth,
+                         F("file '%s' is a directory") % pth);
+    file_id curr_id;
+    calculate_ident(pth, curr_id);
+    E(curr_id == old_id, origin::system,
+      F("content of file '%s' has changed, not overwriting") % pth);
+    P(F("updating '%s'") % pth);
 
-  file_data dat;
-  source.get_version(new_id, dat);
-  write_data(pth, dat.inner());
-}
+    file_data dat;
+    source.get_version(new_id, dat);
+    write_data(pth, dat.inner());
+  }
 
-void
-editable_working_tree::clear_attr(file_path const & path,
-                                  attr_key const & key)
-{
-  L(FL("calling hook to clear attribute %s on %s") % key % path);
-  lua.hook_clear_attribute(key(), path);
-}
+  void
+  editable_working_tree::clear_attr(file_path const & path,
+                                    attr_key const & key)
+  {
+    L(FL("calling hook to clear attribute %s on %s") % key % path);
+    lua.hook_clear_attribute(key(), path);
+  }
 
-void
-editable_working_tree::set_attr(file_path const & path,
-                                attr_key const & key,
-                                attr_value const & value)
-{
-  L(FL("calling hook to set attribute %s on %s to %s") % key % path % value);
-  lua.hook_set_attribute(key(), path, value());
-}
+  void
+  editable_working_tree::set_attr(file_path const & path,
+                                  attr_key const & key,
+                                  attr_value const & value)
+  {
+    L(FL("calling hook to set attribute %s on %s to %s") % key % path % value);
+    lua.hook_set_attribute(key(), path, value());
+  }
 
-void
-editable_working_tree::commit()
-{
-  I(rename_add_drop_map.empty());
-  I(root_dir_attached);
-}
+  void
+  editable_working_tree::commit()
+  {
+    I(rename_add_drop_map.empty());
+    I(root_dir_attached);
+  }
 
-editable_working_tree::~editable_working_tree()
-{
-}
+  editable_working_tree::~editable_working_tree()
+  {
+  }
 
 
-node_id
-simulated_working_tree::detach_node(file_path const & src)
-{
-  node_id nid = workspace.detach_node(src);
-  nid_map.insert(make_pair(nid, src));
-  return nid;
-}
+  node_id
+  simulated_working_tree::detach_node(file_path const & src)
+  {
+    node_id nid = workspace.detach_node(src);
+    nid_map.insert(make_pair(nid, src));
+    return nid;
+  }
 
-void
-simulated_working_tree::drop_detached_node(node_id nid)
-{
-  const_node_t node = workspace.get_node(nid);
-  if (is_dir_t(node))
-    {
-      const_dir_t dir = downcast_to_dir_t(node);
-      if (!dir->children.empty())
-        {
-          map<node_id, file_path>::const_iterator i = nid_map.find(nid);
-          I(i != nid_map.end());
-          W(F("cannot drop non-empty directory '%s'") % i->second);
-          conflicts++;
-          for (dir_map::const_iterator j = dir->children.begin();
-               j != dir->children.end(); ++j)
-            conflicting_paths.insert(i->second / j->first);
-        }
-    }
-}
+  void
+  simulated_working_tree::drop_detached_node(node_id nid)
+  {
+    const_node_t node = workspace.get_node(nid);
+    if (is_dir_t(node))
+      {
+        const_dir_t dir = downcast_to_dir_t(node);
+        if (!dir->children.empty())
+          {
+            map<node_id, file_path>::const_iterator i = nid_map.find(nid);
+            I(i != nid_map.end());
+            W(F("cannot drop non-empty directory '%s'") % i->second);
+            conflicts++;
+            for (dir_map::const_iterator j = dir->children.begin();
+                 j != dir->children.end(); ++j)
+              conflicting_paths.insert(i->second / j->first);
+          }
+      }
+  }
 
-node_id
-simulated_working_tree::create_dir_node()
-{
-  return workspace.create_dir_node(nis);
-}
+  node_id
+  simulated_working_tree::create_dir_node()
+  {
+    return workspace.create_dir_node(nis);
+  }
 
-node_id
-simulated_working_tree::create_file_node(file_id const & content)
-{
-  return workspace.create_file_node(content, nis);
-}
+  node_id
+  simulated_working_tree::create_file_node(file_id const & content)
+  {
+    return workspace.create_file_node(content, nis);
+  }
 
-void
-simulated_working_tree::attach_node(node_id nid, file_path const & dst)
-{
-  // this check is needed for checkout because we're using a roster to
-  // represent paths that *may* block the checkout. however to represent
-  // these we *must* have a root node in the roster which will *always*
-  // block us. so here we check for that case and avoid it.
-  if (dst.empty() && workspace.has_root())
-    return;
+  void
+  simulated_working_tree::attach_node(node_id nid, file_path const & dst)
+  {
+    // this check is needed for checkout because we're using a roster to
+    // represent paths that *may* block the checkout. however to represent
+    // these we *must* have a root node in the roster which will *always*
+    // block us. so here we check for that case and avoid it.
+    if (dst.empty() && workspace.has_root())
+      return;
 
-  if (workspace.has_node(dst))
-    {
-      W(F("attach node %d blocked by unversioned path '%s'") % nid % dst);
-      blocked_paths.insert(dst);
-      conflicting_paths.insert(dst);
-      conflicts++;
-    }
-  else if (dst.empty())
-    {
-      // the parent of the workspace root cannot be in the blocked set
-      // this attach would have been caught above if it were a problem
-      workspace.attach_node(nid, dst);
-    }
-  else
-    {
-      file_path parent = dst.dirname();
+    if (workspace.has_node(dst))
+      {
+        W(F("attach node %d blocked by unversioned path '%s'") % nid % dst);
+        blocked_paths.insert(dst);
+        conflicting_paths.insert(dst);
+        conflicts++;
+      }
+    else if (dst.empty())
+      {
+        // the parent of the workspace root cannot be in the blocked set
+        // this attach would have been caught above if it were a problem
+        workspace.attach_node(nid, dst);
+      }
+    else
+      {
+        file_path parent = dst.dirname();
 
-      if (blocked_paths.find(parent) == blocked_paths.end())
-        workspace.attach_node(nid, dst);
-      else
-        {
-          W(F("attach node %d blocked by blocked parent '%s'")
-            % nid % parent);
-          blocked_paths.insert(dst);
-        }
-    }
-}
+        if (blocked_paths.find(parent) == blocked_paths.end())
+          workspace.attach_node(nid, dst);
+        else
+          {
+            W(F("attach node %d blocked by blocked parent '%s'")
+              % nid % parent);
+            blocked_paths.insert(dst);
+          }
+      }
+  }
 
-void
-simulated_working_tree::apply_delta(file_path const & path,
-                                    file_id const & old_id,
-                                    file_id const & new_id)
-{
-  // this may fail if path is not a file but that will be caught
-  // earlier in update_current_roster_from_filesystem
-}
+  void
+  simulated_working_tree::apply_delta(file_path const & path,
+                                      file_id const & old_id,
+                                      file_id const & new_id)
+  {
+    // this may fail if path is not a file but that will be caught
+    // earlier in update_current_roster_from_filesystem
+  }
 
-void
-simulated_working_tree::clear_attr(file_path const & path,
-                                   attr_key const & key)
-{
-}
+  void
+  simulated_working_tree::clear_attr(file_path const & path,
+                                     attr_key const & key)
+  {
+  }
 
-void
-simulated_working_tree::set_attr(file_path const & path,
-                                 attr_key const & key,
-                                 attr_value const & val)
-{
-}
+  void
+  simulated_working_tree::set_attr(file_path const & path,
+                                   attr_key const & key,
+                                   attr_value const & val)
+  {
+  }
 
-void
-simulated_working_tree::commit()
-{
-  // This used to error out on any conflicts, but now some can be resolved
-  // (by --move-conflicting-paths), so we just warn. The non-resolved
-  // conflicts generate other errors downstream.
-  if (conflicts > 0)
-    F("%d workspace conflicts") % conflicts;
-}
+  void
+  simulated_working_tree::commit()
+  {
+    // This used to error out on any conflicts, but now some can be resolved
+    // (by --move-conflicting-paths), so we just warn. The non-resolved
+    // conflicts generate other errors downstream.
+    if (conflicts > 0)
+      F("%d workspace conflicts") % conflicts;
+  }
 
-simulated_working_tree::~simulated_working_tree()
-{
-}
+  simulated_working_tree::~simulated_working_tree()
+  {
+  }
 
 
 }; // anonymous namespace
@@ -1608,7 +1609,7 @@ move_conflicting_paths_into_bookkeeping(
   mkdir_p(leftover_path);
 
   for (set<file_path>::const_iterator i = leftover_paths.begin();
-        i != leftover_paths.end(); ++i)
+       i != leftover_paths.end(); ++i)
     {
       L(FL("processing %s") % *i);
 
@@ -1776,7 +1777,7 @@ workspace::find_unknown_and_ignored(data
 
   file_itemizer u(db, *this, known, unknown, ignored, mask);
   for (vector<file_path>::const_iterator
-         i = roots.begin(); i != roots.end(); ++i)
+       i = roots.begin(); i != roots.end(); ++i)
     {
       walk_tree(*i, u);
     }
@@ -2130,7 +2131,7 @@ workspace::perform_pivot_root(database &
   {
     file_path current_path_to_put_old = (new_root / put_old);
     file_path current_path_to_put_old_parent
-      = current_path_to_put_old.dirname();
+    = current_path_to_put_old.dirname();
 
     E(old_roster.has_node(current_path_to_put_old_parent), origin::user,
       F("directory '%s' is not versioned or does not exist")
@@ -2219,7 +2220,7 @@ workspace::perform_content_update(roster
   // old versions and doesn't reset attributes (mtn:execute).
 
   for (map<file_path, pair<file_id, file_id> >::const_iterator
-         i = update.deltas_applied.begin(); i != update.deltas_applied.end();
+       i = update.deltas_applied.begin(); i != update.deltas_applied.end();
        ++i)
     {
       const_node_t node = new_roster.get_node(i->first);
============================================================
--- src/xdelta.cc	5094e62fb1c7881a0b8da6331f2fc7488a342146
+++ src/xdelta.cc	1cf88b0bc098831ed583d2dc60fc4288a08b96b0
@@ -58,7 +58,7 @@ struct
 typedef hashmap::hash_map<u32, extent> match_table;
 
 struct
-insn
+  insn
 {
   insn(char c) : code(insert), pos(0), len(0), payload("")  { payload += c; }
   insn(string s) : code(insert), pos(0), len(s.size()), payload(s)  {}
@@ -115,7 +115,7 @@ find_match(match_table const & matches,
 
   // maybe we haven't seen it at all?
   if (e == matches.end())
-      return false;
+    return false;
 
   string::size_type tpos = e->second.first;
   string::size_type tlen = e->second.second;
@@ -265,7 +265,7 @@ compute_delta_insns(string const & a,
           // it does a multiply, but for now, ignore this; it turns out that
           // advancements in the range of [2..blocksz-1] are actually really
           // rare.
-          if (badvance >= blocksz/2)
+          if (badvance >= blocksz / 2)
             {
               u32 new_lo = save_lo + badvance;
               u32 new_hi = new_lo + blocksz;
@@ -274,7 +274,7 @@ compute_delta_insns(string const & a,
                   new_hi = b.size();
                 }
               I(new_lo <= new_hi);
-              rolling.replace_with(reinterpret_cast<u8 const *>(b.data() + new_lo), new_hi-new_lo);
+              rolling.replace_with(reinterpret_cast<u8 const *>(b.data() + new_lo), new_hi - new_lo);
               lo = new_lo;
             }
         }
@@ -348,7 +348,7 @@ struct
 }
 
 struct
-simple_applicator
+  simple_applicator
   : public delta_applicator
 {
   string src;
@@ -361,7 +361,7 @@ simple_applicator
   }
   virtual void next()
   {
-    swap(src,dst);
+    swap(src, dst);
     dst.clear();
   }
   virtual void finish(string & out)
@@ -380,7 +380,7 @@ inline string::size_type
 };
 
 inline string::size_type
-read_num(string::const_iterator &i,
+read_num(string::const_iterator & i,
          string::const_iterator e)
 {
   string::size_type n = 0;
@@ -416,7 +416,7 @@ apply_delta(shared_ptr<delta_applicator>
           I((i - delta.begin()) + len <= delta.size());
           if (len > 0)
             {
-              string tmp(i, i+len);
+              string tmp(i, i + len);
               da->insert(tmp);
             }
           i += len;
@@ -511,7 +511,7 @@ struct
 typedef vector<chunk> version_spec;
 
 struct
-piece_table
+  piece_table
 {
   vector<string> pieces;
 
@@ -555,7 +555,7 @@ struct
 }
 
 struct
-chunk_less_than
+  chunk_less_than
 {
   bool operator()(chunk const & ch1, chunk const & ch2) const
   {
@@ -596,7 +596,7 @@ apply_copy(version_spec const & in, vers
   if (!out.empty())
     dst_vpos = out.back().vpos + out.back().len;
   version_pos dst_final = dst_vpos + src_len;
-  chunk src_bounding_chunk(0,0,src_vpos,0);
+  chunk src_bounding_chunk(0, 0, src_vpos, 0);
   version_spec::const_iterator lo = lower_bound(in.begin(),
                                                 in.end(),
                                                 src_bounding_chunk,
@@ -646,7 +646,7 @@ struct
 
 
 struct
-piecewise_applicator
+  piecewise_applicator
   : public delta_applicator
 {
   piece_table pt;
@@ -671,7 +671,7 @@ piecewise_applicator
 
   virtual void next()
   {
-    swap(src,dst);
+    swap(src, dst);
     dst->clear();
   }
 
@@ -725,12 +725,12 @@ struct copied_extent
   bool operator<(copied_extent const & other) const
   {
     return (old_pos < other.old_pos) ||
-      (old_pos == other.old_pos && len > other.len);
+           (old_pos == other.old_pos && len > other.len);
   }
 };
 
 struct
-inverse_delta_writing_applicator :
+  inverse_delta_writing_applicator :
   public delta_applicator
 {
   string const & old;
============================================================
--- src/ui.cc	8bdaebf0da3e90cbe0026d140c2000e92998693f
+++ src/ui.cc	82328f23df1272f1b6e9e63141d73b4807bf6b2d
@@ -59,16 +59,16 @@ struct user_interface::impl
 
   bool some_tick_is_dirty;    // At least one tick needs being printed
   bool last_write_was_a_tick;
-  map<string,ticker *> tickers;
+  map<string, ticker *> tickers;
   tick_writer * t_writer;
   string tick_trailer;
 
   impl() : some_tick_is_dirty(false), last_write_was_a_tick(false),
-           t_writer(0) {}
+    t_writer(0) {}
 };
 
 ticker::ticker(string const & tickname, string const & s, size_t mod,
-    bool kilocount, bool skip_display) :
+               bool kilocount, bool skip_display) :
   ticks(0),
   mod(mod),
   total(0),
@@ -153,7 +153,7 @@ private:
   void write_ticks();
   void clear_line();
 private:
-  std::map<std::string,size_t> last_ticks;
+  std::map<std::string, size_t> last_ticks;
   unsigned int chars_on_line;
 };
 
@@ -165,7 +165,7 @@ private:
   void write_ticks();
   void clear_line();
 private:
-  std::map<std::string,size_t> last_ticks;
+  std::map<std::string, size_t> last_ticks;
 };
 
 struct tick_write_nothing : virtual public tick_writer
@@ -183,7 +183,7 @@ tick_write_count::~tick_write_count()
 {
 }
 
-static string compose_count(ticker *tick, size_t ticks=0)
+static string compose_count(ticker * tick, size_t ticks = 0)
 {
   string count;
 
@@ -196,7 +196,7 @@ static string compose_count(ticker *tick
     {
       // automatic unit conversion is enabled
       float div = 1.0;
-      const char *message;
+      const char * message;
 
       if (ticks >= 1073741824)
         {
@@ -245,7 +245,7 @@ void tick_write_count::write_ticks()
   vector<string> tick_count_strings;
 
   I(ui.imp);
-  for (map<string,ticker *>::const_iterator i = ui.imp->tickers.begin();
+  for (map<string, ticker *>::const_iterator i = ui.imp->tickers.begin();
        i != ui.imp->tickers.end(); ++i)
     {
       ticker * tick = i->second;
@@ -404,10 +404,10 @@ void tick_write_dot::write_ticks()
       chars_on_line = tickline_prefix.size();
     }
 
-  for (map<string,ticker *>::const_iterator i = ui.imp->tickers.begin();
+  for (map<string, ticker *>::const_iterator i = ui.imp->tickers.begin();
        i != ui.imp->tickers.end(); ++i)
     {
-      map<string,size_t>::const_iterator old = last_ticks.find(i->first);
+      map<string, size_t>::const_iterator old = last_ticks.find(i->first);
 
       if (!ui.imp->last_write_was_a_tick)
         {
@@ -460,11 +460,11 @@ void tick_write_stdio::write_ticks()
   I(ui.imp);
   string headers, sizes, tickline;
 
-  for (map<string,ticker *>::const_iterator i = ui.imp->tickers.begin();
+  for (map<string, ticker *>::const_iterator i = ui.imp->tickers.begin();
        i != ui.imp->tickers.end(); ++i)
     {
       std::map<std::string, size_t>::iterator it =
-            last_ticks.find(i->second->shortname);
+        last_ticks.find(i->second->shortname);
 
       // we output each explanation stanza just once and every time the
       // total count has been changed
@@ -474,8 +474,7 @@ void tick_write_stdio::write_ticks()
           sizes   += i->second->shortname + "=" +  lexical_cast<string>(i->second->total) + ";";
           last_ticks[i->second->shortname] = i->second->total;
         }
-      else
-      if (it->second != i->second->total)
+      else if (it->second != i->second->total)
         {
           sizes   += i->second->shortname + "=" +  lexical_cast<string>(i->second->total) + ";";
           last_ticks[i->second->shortname] = i->second->total;
@@ -503,9 +502,9 @@ void tick_write_stdio::clear_line()
   std::string out;
 
   for (it = last_ticks.begin(); it != last_ticks.end(); it++)
-  {
-    out += it->first + ";";
-  }
+    {
+      out += it->first + ";";
+    }
 
   global_sanity.maybe_write_to_out_of_band_handler('t', out);
   last_ticks.clear();
@@ -746,7 +745,7 @@ user_interface::fatal_exception()
 int
 user_interface::fatal_exception()
 {
-  std::type_info *type = get_current_exception_type();
+  std::type_info * type = get_current_exception_type();
   if (type)
     {
       char const * name = type->name();
@@ -765,37 +764,41 @@ user_interface::output_prefix()
 {
   std::string prefix;
 
-  if (timestamps_enabled) {
-    try {
-      // To prevent possible infinite loops from a spurious log being
-      // made down the line from the call to .as_formatted_localtime,
-      // we temporarly turn off timestamping.  Not by fiddling with
-      // timestamp_enabled, though, since that one might be looked at
-      // by some other code.
-      static int do_timestamp = 0;
+  if (timestamps_enabled)
+    {
+      try
+        {
+          // To prevent possible infinite loops from a spurious log being
+          // made down the line from the call to .as_formatted_localtime,
+          // we temporarly turn off timestamping.  Not by fiddling with
+          // timestamp_enabled, though, since that one might be looked at
+          // by some other code.
+          static int do_timestamp = 0;
 
-      if (++do_timestamp == 1) {
-        // FIXME: with no app pointer around we have no access to
-        // app.lua.get_date_format_spec() here, so we use the same format
-        // which f.e. also Apache uses for its log output
-        prefix = "[" +
-          date_t::now().as_formatted_localtime("%a %b %d %H:%M:%S %Y") +
-          "] ";
-      }
-      --do_timestamp;
+          if (++do_timestamp == 1)
+            {
+              // FIXME: with no app pointer around we have no access to
+              // app.lua.get_date_format_spec() here, so we use the same format
+              // which f.e. also Apache uses for its log output
+              prefix = "[" +
+                       date_t::now().as_formatted_localtime("%a %b %d %H:%M:%S %Y") +
+                       "] ";
+            }
+          --do_timestamp;
+        }
+      // ensure that we do not throw an exception because we could not
+      // create the timestamp prefix above
+      catch (...) {}
     }
-    // ensure that we do not throw an exception because we could not
-    // create the timestamp prefix above
-    catch (...) {}
-  }
 
-  if (prog_name.empty()) {
-    prefix += "?: ";
-  }
+  if (prog_name.empty())
+    {
+      prefix += "?: ";
+    }
   else
-  {
-    prefix += prog_name + ": ";
-  }
+    {
+      prefix += prog_name + ": ";
+    }
 
   return prefix;
 }
@@ -979,7 +982,8 @@ format_text(i18n_format const & text, si
   return format_text(text.str(), col, curcol, indent_first_line);
 }
 
-namespace {
+namespace
+{
   class option_text
   {
     string names;
@@ -999,8 +1003,8 @@ namespace {
           return full_len;
         }
 
-      formatted_names.push_back(names.substr(0, slash-1));
-      formatted_names.push_back(" " + names.substr(slash-1));
+      formatted_names.push_back(names.substr(0, slash - 1));
+      formatted_names.push_back(" " + names.substr(slash - 1));
 
       size_t ret = 0;
       for (vector<string>::const_iterator i = formatted_names.begin();
@@ -1029,10 +1033,10 @@ namespace {
             right = &formatted_desc.at(i);
 
           ret += string(pre_indent, ' ')
-            + *left + string(namelen - left->size(), ' ')
-            + string(space, ' ')
-            + *right
-            + "\n";
+                 + *left + string(namelen - left->size(), ' ')
+                 + string(space, ' ')
+                 + *right
+                 + "\n";
         }
       return ret;
     }
@@ -1113,7 +1117,7 @@ user_interface::inform_usage(usage const
                                           u.which.end()))();
 
   usage_stream << F("Usage: %s [OPTION...] command [ARG...]") %
-    prog_name << "\n\n";
+               prog_name << "\n\n";
 
   if (u.which.empty())
     usage_stream << get_usage_str(options::opts::globals(), opts);
@@ -1125,10 +1129,10 @@ user_interface::inform_usage(usage const
   if (!cmd_options.empty())
     {
       usage_stream
-        << F("Options specific to '%s %s' "
-             "(run '%s help' to see global options):")
-        % prog_name % visibleid % prog_name
-        << "\n\n";
+          << F("Options specific to '%s %s' "
+               "(run '%s help' to see global options):")
+          % prog_name % visibleid % prog_name
+          << "\n\n";
       usage_stream << get_usage_str(cmd_options, opts);
     }
 
============================================================
--- src/ui.hh	405d3582ac4c576a9ad0a6f8885eb1ac12be2576
+++ src/ui.hh	380cc7b8d1c4e8c571b6d952497700acf383d35c
@@ -35,7 +35,7 @@ struct ticker
   std::string shortname;
   size_t count_size;
   ticker(std::string const & n, std::string const & s, size_t mod = 64,
-      bool kilocount=false, bool skip_display=false);
+         bool kilocount = false, bool skip_display = false);
   void set_total(size_t tot) { use_total = true; total = tot; }
   void set_count_size(size_t csiz) { count_size = csiz; }
   void operator++();
@@ -66,7 +66,7 @@ public:
   int fatal_exception();
   void set_tick_trailer(std::string const & trailer);
 
-  enum ticker_type { count=1, dot, stdio, none };
+  enum ticker_type { count = 1, dot, stdio, none };
   void set_tick_write_dot();
   void set_tick_write_count();
   void set_tick_write_stdio();
============================================================
--- src/migrate_schema.cc	6795de38c20c625b5f5d955cc4beb26b243dcf57
+++ src/migrate_schema.cc	8f1fa51a198ce29bf1da3043c7f523b8c12197b0
@@ -62,7 +62,7 @@ assert_sqlite3_ok(sqlite3 * db)
   // do this to avoid corrupting sqlite's internal state.)  If it is,
   // rethrow it rather than feeding it to E(), lest we get "error:
   // sqlite error: error: " ugliness.
-  char const *pfx = _("error: ");
+  char const * pfx = _("error: ");
   if (!std::strncmp(errmsg, pfx, strlen(pfx)))
     throw recoverable_failure(origin::database, errmsg);
 
@@ -82,8 +82,8 @@ assert_sqlite3_ok(sqlite3 * db)
     case SQLITE_CANTOPEN:
     case SQLITE_PROTOCOL:
       auxiliary_message
-        = _("make sure database and containing directory are writeable\n"
-            "and you have not run out of disk space");
+      = _("make sure database and containing directory are writeable\n"
+          "and you have not run out of disk space");
       break;
 
       // These error codes may indicate someone is trying to load a database
@@ -92,9 +92,9 @@ assert_sqlite3_ok(sqlite3 * db)
     case SQLITE_CORRUPT:
     case SQLITE_NOTADB:
       auxiliary_message
-        = _("(if this is a database last used by monotone 0.16 or older,\n"
-            "you must follow a special procedure to make it usable again.\n"
-            "see the file UPGRADE, in the distribution, for instructions.)");
+      = _("(if this is a database last used by monotone 0.16 or older,\n"
+          "you must follow a special procedure to make it usable again.\n"
+          "see the file UPGRADE, in the distribution, for instructions.)");
 
     default:
       break;
@@ -110,7 +110,7 @@ namespace
 {
   struct sql
   {
-    sql(sqlite3 * db, int cols, char const *cmd, char const **afterp = 0)
+    sql(sqlite3 * db, int cols, char const * cmd, char const ** afterp = 0)
       : stmt(0), ncols(cols)
     {
       sqlite3_stmt * s;
@@ -258,8 +258,8 @@ static void
 }
 
 static void
-sqlite3_sha1_fn_body(sqlite3_context *f, int nargs, sqlite3_value ** args,
-                    bool strip_whitespace)
+sqlite3_sha1_fn_body(sqlite3_context * f, int nargs, sqlite3_value ** args,
+                     bool strip_whitespace)
 {
   if (nargs <= 1)
     {
@@ -290,7 +290,7 @@ sqlite3_sha1_fn_body(sqlite3_context *f,
             }
           else
             {
-              tmp.append(s, end+1);
+              tmp.append(s, end + 1);
             }
         }
     }
@@ -301,19 +301,19 @@ static void
 }
 
 static void
-sqlite3_sha1_nows_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
+sqlite3_sha1_nows_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   sqlite3_sha1_fn_body(f, nargs, args, true);
 }
 
 static void
-sqlite3_sha1_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
+sqlite3_sha1_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   sqlite3_sha1_fn_body(f, nargs, args, false);
 }
 
 static void
-sqlite3_unbase64_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
+sqlite3_unbase64_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   if (nargs != 1)
     {
@@ -338,7 +338,7 @@ static void
 }
 
 static void
-sqlite3_unhex_fn(sqlite3_context *f, int nargs, sqlite3_value **args)
+sqlite3_unhex_fn(sqlite3_context * f, int nargs, sqlite3_value ** args)
 {
   if (nargs != 1)
     {
@@ -704,14 +704,14 @@ char const migrate_to_binary_hashes[] =
   // table completely.
   "ALTER TABLE revision_certs RENAME TO tmp;\n"
   "CREATE TABLE revision_certs"
-        "  ( hash not null unique,   -- hash of remaining fields separated by \":\"\n"
-        "    id not null,            -- joins with revisions.id\n"
-        "    name not null,          -- opaque string chosen by user\n"
-        "    value not null,         -- opaque blob\n"
-        "    keypair not null,       -- joins with public_keys.id\n"
-        "    signature not null,     -- RSA/SHA1 signature of \"address@hidden:val]\"\n"
-        "    unique(name, value, id, keypair, signature)\n"
-        "  );"
+  "  ( hash not null unique,   -- hash of remaining fields separated by \":\"\n"
+  "    id not null,            -- joins with revisions.id\n"
+  "    name not null,          -- opaque string chosen by user\n"
+  "    value not null,         -- opaque blob\n"
+  "    keypair not null,       -- joins with public_keys.id\n"
+  "    signature not null,     -- RSA/SHA1 signature of \"address@hidden:val]\"\n"
+  "    unique(name, value, id, keypair, signature)\n"
+  "  );"
   "INSERT INTO revision_certs SELECT unhex(hash), unhex(id), name, value, keypair, signature FROM tmp;"
   "DROP TABLE tmp;"
   "CREATE INDEX revision_certs__id ON revision_certs (id);"
@@ -721,10 +721,10 @@ char const migrate_to_binary_hashes[] =
   // schema hash to upgrade to.
   "ALTER TABLE branch_epochs RENAME TO tmp;"
   "CREATE TABLE branch_epochs"
-        "  ( hash not null unique,         -- hash of remaining fields separated by \":\"\n"
-        "    branch not null unique,       -- joins with revision_certs.value\n"
-        "    epoch not null                -- random binary id\n"
-        "  );"
+  "  ( hash not null unique,         -- hash of remaining fields separated by \":\"\n"
+  "    branch not null unique,       -- joins with revision_certs.value\n"
+  "    epoch not null                -- random binary id\n"
+  "  );"
   "INSERT INTO branch_epochs SELECT unhex(hash), branch, unhex(epoch) FROM tmp;"
   "DROP TABLE tmp;"
 
@@ -772,7 +772,8 @@ char const migrate_better_cert_indexing[
   "CREATE INDEX revision_certs__revnameval ON revision_certs (revision_id,\n"
   "       name, value, keypair_id, signature);";
 
-namespace {
+namespace
+{
   struct branch_leaf_finder_info
   {
     std::set<string> parents;
@@ -843,9 +844,9 @@ migrate_add_branch_leaf_cache(sqlite3 * 
             continue;
           string q = string("insert into branch_leaves(branch, revision_id) "
                             "values(X'")
-            + encode_hexenc(*b, origin::internal) + "', X'"
-            + encode_hexenc(rev, origin::internal) + "')";
-            sql::exec(db, q.c_str());
+                     + encode_hexenc(*b, origin::internal) + "', X'"
+                     + encode_hexenc(rev, origin::internal) + "')";
+          sql::exec(db, q.c_str());
         }
       for (std::set<string>::iterator p = my_info.parents.begin();
            p != my_info.parents.end(); ++p)
@@ -869,22 +870,22 @@ char const migrate_add_file_sizes[] =
 }
 
 char const migrate_add_file_sizes[] =
-    "CREATE TABLE file_sizes\n"
-    "        (\n"
-    "        id primary key,     -- joins with files.id or file_deltas.id\n"
-    "        size not null       -- the size of the file in byte\n"
-    "        );";
+  "CREATE TABLE file_sizes\n"
+  "        (\n"
+  "        id primary key,     -- joins with files.id or file_deltas.id\n"
+  "        size not null       -- the size of the file in byte\n"
+  "        );";
 
 
 // these must be listed in order so that ones listed earlier override ones
 // listed later
 enum upgrade_regime
-  {
-    upgrade_changesetify,
-    upgrade_rosterify,
-    upgrade_regen_caches,
-    upgrade_none,
-  };
+{
+  upgrade_changesetify,
+  upgrade_rosterify,
+  upgrade_regen_caches,
+  upgrade_none,
+};
 static void
 dump(enum upgrade_regime const & regime, string & out)
 {
@@ -915,60 +916,97 @@ struct migration_event
 // also add a new migration test for the new schema version.  See
 // tests/schema_migration for details.
 
-const migration_event migration_events[] = {
-  { "edb5fa6cef65bcb7d0c612023d267c3aeaa1e57a",
-    migrate_merge_url_and_group, 0, upgrade_none, regen_none},
+const migration_event migration_events[] =
+{
+  {
+    "edb5fa6cef65bcb7d0c612023d267c3aeaa1e57a",
+    migrate_merge_url_and_group, 0, upgrade_none, regen_none
+  },
 
-  { "f042f3c4d0a4f98f6658cbaf603d376acf88ff4b",
-    migrate_add_hashes_and_merkle_trees, 0, upgrade_none, regen_none },
+  {
+    "f042f3c4d0a4f98f6658cbaf603d376acf88ff4b",
+    migrate_add_hashes_and_merkle_trees, 0, upgrade_none, regen_none
+  },
 
-  { "8929e54f40bf4d3b4aea8b037d2c9263e82abdf4",
-    migrate_to_revisions, 0, upgrade_changesetify, regen_none },
+  {
+    "8929e54f40bf4d3b4aea8b037d2c9263e82abdf4",
+    migrate_to_revisions, 0, upgrade_changesetify, regen_none
+  },
 
-  { "c1e86588e11ad07fa53e5d294edc043ce1d4005a",
-    migrate_to_epochs, 0, upgrade_none, regen_none },
+  {
+    "c1e86588e11ad07fa53e5d294edc043ce1d4005a",
+    migrate_to_epochs, 0, upgrade_none, regen_none
+  },
 
-  { "40369a7bda66463c5785d160819ab6398b9d44f4",
-    migrate_to_vars, 0, upgrade_none, regen_none },
+  {
+    "40369a7bda66463c5785d160819ab6398b9d44f4",
+    migrate_to_vars, 0, upgrade_none, regen_none
+  },
 
-  { "e372b508bea9b991816d1c74680f7ae10d2a6d94",
-    migrate_add_indexes, 0, upgrade_none, regen_none },
+  {
+    "e372b508bea9b991816d1c74680f7ae10d2a6d94",
+    migrate_add_indexes, 0, upgrade_none, regen_none
+  },
 
-  { "1509fd75019aebef5ac3da3a5edf1312393b70e9",
-    0, migrate_to_external_privkeys, upgrade_none, regen_none },
+  {
+    "1509fd75019aebef5ac3da3a5edf1312393b70e9",
+    0, migrate_to_external_privkeys, upgrade_none, regen_none
+  },
 
-  { "bd86f9a90b5d552f0be1fa9aee847ea0f317778b",
-    migrate_add_rosters, 0, upgrade_rosterify, regen_none },
+  {
+    "bd86f9a90b5d552f0be1fa9aee847ea0f317778b",
+    migrate_add_rosters, 0, upgrade_rosterify, regen_none
+  },
 
-  { "1db80c7cee8fa966913db1a463ed50bf1b0e5b0e",
-    migrate_files_BLOB, 0, upgrade_none, regen_none },
+  {
+    "1db80c7cee8fa966913db1a463ed50bf1b0e5b0e",
+    migrate_files_BLOB, 0, upgrade_none, regen_none
+  },
 
-  { "9d2b5d7b86df00c30ac34fe87a3c20f1195bb2df",
-    migrate_rosters_no_hash, 0, upgrade_regen_caches, regen_rosters },
+  {
+    "9d2b5d7b86df00c30ac34fe87a3c20f1195bb2df",
+    migrate_rosters_no_hash, 0, upgrade_regen_caches, regen_rosters
+  },
 
-  { "ae196843d368d042f475e3dadfed11e9d7f9f01e",
-    migrate_add_heights, 0, upgrade_regen_caches, regen_heights },
+  {
+    "ae196843d368d042f475e3dadfed11e9d7f9f01e",
+    migrate_add_heights, 0, upgrade_regen_caches, regen_heights
+  },
 
-  { "48fd5d84f1e5a949ca093e87e5ac558da6e5956d",
-    0, migrate_add_ccode, upgrade_none, regen_none },
+  {
+    "48fd5d84f1e5a949ca093e87e5ac558da6e5956d",
+    0, migrate_add_ccode, upgrade_none, regen_none
+  },
 
-  { "fe48b0804e0048b87b4cea51b3ab338ba187bdc2",
-    migrate_add_heights_index, 0, upgrade_none, regen_none },
+  {
+    "fe48b0804e0048b87b4cea51b3ab338ba187bdc2",
+    migrate_add_heights_index, 0, upgrade_none, regen_none
+  },
 
-  { "7ca81b45279403419581d7fde31ed888a80bd34e",
-    migrate_to_binary_hashes, 0, upgrade_none, regen_none },
+  {
+    "7ca81b45279403419581d7fde31ed888a80bd34e",
+    migrate_to_binary_hashes, 0, upgrade_none, regen_none
+  },
 
-  { "212dd25a23bfd7bfe030ab910e9d62aa66aa2955",
-    migrate_certs_to_key_hash, 0, upgrade_none, regen_none },
+  {
+    "212dd25a23bfd7bfe030ab910e9d62aa66aa2955",
+    migrate_certs_to_key_hash, 0, upgrade_none, regen_none
+  },
 
-  { "9c8d5a9ea8e29c69be6459300982a68321b0ec12",
-    0, migrate_add_branch_leaf_cache, upgrade_none, regen_branches },
+  {
+    "9c8d5a9ea8e29c69be6459300982a68321b0ec12",
+    0, migrate_add_branch_leaf_cache, upgrade_none, regen_branches
+  },
 
-  { "0c956abae3e52522e4e0b7c5cbe7868f5047153e",
-    migrate_add_file_sizes, 0, upgrade_regen_caches, regen_file_sizes },
+  {
+    "0c956abae3e52522e4e0b7c5cbe7868f5047153e",
+    migrate_add_file_sizes, 0, upgrade_regen_caches, regen_file_sizes
+  },
 
-  { "1f60cec1b0f6c8c095dc6d0ffeff2bd0af971ce1",
-    migrate_better_cert_indexing, 0, upgrade_none, regen_none },
+  {
+    "1f60cec1b0f6c8c095dc6d0ffeff2bd0af971ce1",
+    migrate_better_cert_indexing, 0, upgrade_none, regen_none
+  },
 
   // The last entry in this table should always be the current
   // schema ID, with 0 for the migrators.
@@ -1061,7 +1099,7 @@ find_migration(sqlite3 * db)
   string id;
   calculate_schema_id(db, id);
 
-  for (migration_event const *m = migration_events + n_migration_events - 1;
+  for (migration_event const * m = migration_events + n_migration_events - 1;
        m >= migration_events; m--)
     if (m->id == id)
       return m;
@@ -1072,13 +1110,13 @@ enum schema_mismatch_case
 // This enumerates the possible mismatches between the monotone executable
 // and its database.
 enum schema_mismatch_case
-  {
-    SCHEMA_MATCHES = 0,
-    SCHEMA_MIGRATION_NEEDED,
-    SCHEMA_TOO_NEW,
-    SCHEMA_NOT_MONOTONE,
-    SCHEMA_EMPTY
-  };
+{
+  SCHEMA_MATCHES = 0,
+  SCHEMA_MIGRATION_NEEDED,
+  SCHEMA_TOO_NEW,
+  SCHEMA_NOT_MONOTONE,
+  SCHEMA_EMPTY
+};
 static void dump(schema_mismatch_case const & cat, std::string & out)
 {
   switch (cat)
@@ -1193,7 +1231,7 @@ check_sql_schema(sqlite3 * db, system_pa
 
 #ifdef SUPPORT_SQLITE_BEFORE_3003014
 // import the hex function for old sqlite libraries from database.cc
-void sqlite3_hex_fn(sqlite3_context *f, int nargs, sqlite3_value **args);
+void sqlite3_hex_fn(sqlite3_context * f, int nargs, sqlite3_value ** args);
 #endif
 
 
@@ -1217,7 +1255,7 @@ migrate_sql_schema(sqlite3 * db, key_sto
 
     P(F("calculating migration..."));
 
-    migration_event const *m; MM(m);
+    migration_event const * m; MM(m);
     schema_mismatch_case cat; MM(cat);
     m = find_migration(db);
     cat = classify_schema(db, m);
@@ -1284,12 +1322,12 @@ migrate_sql_schema(sqlite3 * db, key_sto
     {
     case upgrade_changesetify:
     case upgrade_rosterify:
-      {
-        string command_str = (regime == upgrade_changesetify
-                              ? "changesetify" : "rosterify");
-        return migration_status(regen_none, command_str);
-      }
-      break;
+    {
+      string command_str = (regime == upgrade_changesetify
+                            ? "changesetify" : "rosterify");
+      return migration_status(regen_none, command_str);
+    }
+    break;
     case upgrade_regen_caches:
       I(regen_type != regen_none);
       return migration_status(regen_type);
@@ -1326,7 +1364,7 @@ test_migration_step(sqlite3 * db, key_st
 
   transaction guard(db);
 
-  migration_event const *m;
+  migration_event const * m;
   for (m = migration_events + n_migration_events - 1;
        m >= migration_events; m--)
     if (schema == m->id)
============================================================
--- src/migration.hh	df4687ee1932044712b9b44386dcf3c7ff3c4f22
+++ src/migration.hh	3b9ed0000610e84ed5f33590c0cf4dfb56d358f7
@@ -32,9 +32,11 @@ enum regen_cache_type { regen_none = 0, 
 // value of the "catch all" item "regen_all"
 enum regen_cache_type { regen_none = 0, regen_rosters = 1,
                         regen_heights = 2, regen_branches = 4,
-                        regen_file_sizes = 8, regen_all = 15 };
+                        regen_file_sizes = 8, regen_all = 15
+                      };
 
-class migration_status {
+class migration_status
+{
   regen_cache_type _regen_type;
   std::string _flag_day_name;
 public:
@@ -68,7 +70,7 @@ void test_migration_step(sqlite3 * db, k
 // to change.  we call it a creator code because it has the same format and
 // function as file creator codes in old-sk00l Mac OS.
 
-const unsigned int mtn_creator_code = ((('_'*256 + 'M')*256 + 'T')*256 + 'N');
+const unsigned int mtn_creator_code = ((('_' * 256 + 'M') * 256 + 'T') * 256 + 'N');
 
 
 
============================================================
--- src/constants.cc	b0f021f8b9a3f968cf942929122b02f09d4fda6f
+++ src/constants.cc	e06b82f6f7fd5a37367893418df17efc65b18ebc
@@ -27,41 +27,41 @@ namespace constants
   // base64-encoded data.  note that botan doesn't count \v or \f as
   // whitespace (unlike <ctype.h>) and so neither do we.
   char const legal_base64_bytes[] =
-  // base64 data characters
-  "abcdefghijklmnopqrstuvwxyz"
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-  "0123456789+/="
-  // whitespace
-  " \r\n\t"
-  ;
+    // base64 data characters
+    "abcdefghijklmnopqrstuvwxyz"
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+    "0123456789+/="
+    // whitespace
+    " \r\n\t"
+    ;
 
   // all the ASCII characters (bytes) which are legal in a SHA1 hex id
   char const legal_id_bytes[] =
-  "0123456789abcdef"
-  ;
+    "0123456789abcdef"
+    ;
 
   // all the ASCII characters (bytes) which can occur in cert names
   char const legal_cert_name_bytes[] =
-  // LDH characters
-  "abcdefghijklmnopqrstuvwxyz"
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-  "0123456789"
-  "-"
-  ;
+    // LDH characters
+    "abcdefghijklmnopqrstuvwxyz"
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+    "0123456789"
+    "-"
+    ;
 
   // all the ASCII characters (bytes) which can occur in key names
   char const legal_key_name_bytes[] =
-  // LDH characters
-  "abcdefghijklmnopqrstuvwxyz"
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-  "0123456789"
-  "-"
-  // label and component separators
-  ".@"
-  // other non-shell, non-selector metacharacters allowed in (unquoted) local
-  // parts by RFC2821/RFC2822.  The full list is !#$%&'*+-/=?^_`|{}~.
-  "+_"
-  ;
+    // LDH characters
+    "abcdefghijklmnopqrstuvwxyz"
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+    "0123456789"
+    "-"
+    // label and component separators
+    ".@"
+    // other non-shell, non-selector metacharacters allowed in (unquoted) local
+    // parts by RFC2821/RFC2822.  The full list is !#$%&'*+-/=?^_`|{}~.
+    "+_"
+    ;
 
   // merkle tree / netcmd / netsync related stuff
   char const netsync_key_initializer[netsync_session_key_length_in_bytes]
============================================================
--- src/netsync.cc	55f9aef4302b12a06baef3ae0088fb07431e226f
+++ src/netsync.cc	2fda9e3fb1c251ef804755f51c63e407e011ee40
@@ -76,14 +76,14 @@ build_stream_to_server(options & opts, l
       string cmd = args[0];
       args.erase(args.begin());
       return shared_ptr<Netxx::StreamBase>
-        (new Netxx::PipeStream(cmd, args));
+             (new Netxx::PipeStream(cmd, args));
     }
   else
     {
 #ifdef USE_IPV6
-      bool use_ipv6=true;
+      bool use_ipv6 = true;
 #else
-      bool use_ipv6=false;
+      bool use_ipv6 = false;
 #endif
       string host(info->client.get_uri().host);
       I(!host.empty());
@@ -91,7 +91,7 @@ build_stream_to_server(options & opts, l
                           info->client.get_port(),
                           use_ipv6);
       return shared_ptr<Netxx::StreamBase>
-        (new Netxx::Stream(addr, timeout));
+             (new Netxx::Stream(addr, timeout));
     }
 }
 
@@ -107,14 +107,14 @@ call_server(app_state & app,
   transaction_guard guard(project.db);
 
   Netxx::Timeout timeout(static_cast<long>(constants::netsync_timeout_seconds)),
-    instant(0,1);
+        instant(0, 1);
 
   P(F("connecting to '%s'") % info->client.get_uri().resource());
   P(F("  include pattern  '%s'") % info->client.get_include_pattern());
   P(F("  exclude pattern  '%s'") % info->client.get_exclude_pattern());
 
   shared_ptr<Netxx::StreamBase> server
-    = build_stream_to_server(app.opts, app.lua, info, timeout);
+  = build_stream_to_server(app.opts, app.lua, info, timeout);
 
   // 'false' here means not to revert changes when the SockOpt
   // goes out of scope.
@@ -218,8 +218,8 @@ session_from_server_sync_item(app_state 
     {
       P(F("connecting to '%s'") % info->client.get_uri().resource());
       shared_ptr<Netxx::StreamBase> server
-        = build_stream_to_server(app.opts, app.lua, info,
-                                 Netxx::Timeout(constants::netsync_timeout_seconds));
+      = build_stream_to_server(app.opts, app.lua, info,
+                               Netxx::Timeout(constants::netsync_timeout_seconds));
 
       // 'false' here means not to revert changes when
       // the SockOpt goes out of scope.
@@ -227,17 +227,17 @@ session_from_server_sync_item(app_state 
       socket_options.set_non_blocking();
 
       shared_ptr<session>
-        sess(new session(app, project, keys,
-                         client_voice,
-                         info->client.get_uri().resource(), server));
+      sess(new session(app, project, keys,
+                       client_voice,
+                       info->client.get_uri().resource(), server));
       shared_ptr<wrapped_session>
-        wrapped(new netsync_session(sess.get(),
-                                    app.opts, app.lua, project,
-                                    keys, request.role,
-                                    info->client.get_include_pattern(),
-                                    info->client.get_exclude_pattern(),
-                                    connection_counts::create(),
-                                    true));
+      wrapped(new netsync_session(sess.get(),
+                                  app.opts, app.lua, project,
+                                  keys, request.role,
+                                  info->client.get_include_pattern(),
+                                  info->client.get_exclude_pattern(),
+                                  connection_counts::create(),
+                                  true));
       sess->set_inner(wrapped);
       return sess;
     }
@@ -277,9 +277,9 @@ serve_connections(app_state & app,
                   std::vector<utf8> const & addresses)
 {
 #ifdef USE_IPV6
-  bool use_ipv6=true;
+  bool use_ipv6 = true;
 #else
-  bool use_ipv6=false;
+  bool use_ipv6 = false;
 #endif
 
   shared_ptr<transaction_guard> guard(new transaction_guard(project.db));
@@ -296,7 +296,7 @@ serve_connections(app_state & app,
     {
       if (!guard)
         guard = shared_ptr<transaction_guard>
-          (new transaction_guard(project.db));
+                (new transaction_guard(project.db));
       I(guard);
 
       react.ready(*guard);
@@ -304,11 +304,11 @@ serve_connections(app_state & app,
       while (!server_initiated_sync_requests.empty())
         {
           server_initiated_sync_request request
-            = server_initiated_sync_requests.front();
+          = server_initiated_sync_requests.front();
           server_initiated_sync_requests.pop_front();
           shared_ptr<session> sess
-            = session_from_server_sync_item(app,  project, keys,
-                                            request);
+          = session_from_server_sync_item(app,  project, keys,
+                                          request);
 
           if (sess)
             {
@@ -394,12 +394,12 @@ run_netsync_protocol(app_state & app,
         {
           if (opts.bind_stdio)
             {
-              shared_ptr<Netxx::PipeStream> str(new Netxx::PipeStream(0,1));
+              shared_ptr<Netxx::PipeStream> str(new Netxx::PipeStream(0, 1));
 
               shared_ptr<session>
-                sess(new session(app, project, keys,
-                                 server_voice,
-                                 "stdio", str));
+              sess(new session(app, project, keys,
+                               server_voice,
+                               "stdio", str));
               serve_single_connection(project, sess);
             }
           else
============================================================
--- src/merkle_tree.cc	681a6488e15edc8495664d188928fe122caa8bb0
+++ src/merkle_tree.cc	1a6222f82bb1812d451479e12a8fa552aaef1550
@@ -83,10 +83,10 @@ merkle_node::merkle_node() : level(0), p
 
 
 merkle_node::merkle_node() : level(0), pref(0),
-                             total_num_leaves(0),
-                             bitmap(constants::merkle_bitmap_length_in_bits),
-                             slots(constants::merkle_num_slots),
-                             type(revision_item)
+  total_num_leaves(0),
+  bitmap(constants::merkle_bitmap_length_in_bits),
+  slots(constants::merkle_num_slots),
+  type(revision_item)
 {}
 
 bool
@@ -174,7 +174,7 @@ merkle_node::get_slot_state(size_t n) co
 {
   check_invariants();
   I(n < constants::merkle_num_slots);
-  I(2*n + 1 < bitmap.size());
+  I(2 * n + 1 < bitmap.size());
   if (bitmap[2*n])
     {
       if (bitmap[2*n+1])
@@ -193,13 +193,13 @@ merkle_node::set_slot_state(size_t n, sl
 {
   check_invariants();
   I(n < constants::merkle_num_slots);
-  I(2*n + 1 < bitmap.size());
-  bitmap.reset(2*n);
-  bitmap.reset(2*n+1);
+  I(2 * n + 1 < bitmap.size());
+  bitmap.reset(2 * n);
+  bitmap.reset(2 * n + 1);
   if (st == subtree_state || st == leaf_state)
-    bitmap.set(2*n);
+    bitmap.set(2 * n);
   if (st == subtree_state)
-    bitmap.set(2*n+1);
+    bitmap.set(2 * n + 1);
 }
 
 
@@ -270,8 +270,8 @@ read_node(string const & inbuf, size_t &
 
   if (out.level >= constants::merkle_num_tree_levels)
     throw bad_decode(F("node level is %d, exceeds maximum %d")
-                     % widen<u32,u8>(out.level)
-                     % widen<u32,u8>(constants::merkle_num_tree_levels));
+                     % widen<u32, u8>(out.level)
+                     % widen<u32, u8>(constants::merkle_num_tree_levels));
 
   size_t prefixsz = prefix_length_in_bytes(out.level);
   require_bytes(inbuf, pos, prefixsz, "node prefix");
@@ -366,7 +366,7 @@ recalculate_merkle_codes(merkle_table & 
             {
               prefix extended;
               node->extended_raw_prefix(slotnum, extended);
-              slotval = recalculate_merkle_codes(tab, extended, level+1);
+              slotval = recalculate_merkle_codes(tab, extended, level + 1);
               node->set_raw_slot(slotnum, slotval);
             }
         }
@@ -402,7 +402,7 @@ collect_items_in_subtree(merkle_table & 
 
             case subtree_state:
               node->extended_raw_prefix(slot, ext);
-              collect_items_in_subtree(tab, ext, level+1, items);
+              collect_items_in_subtree(tab, ext, level + 1, items);
               break;
             }
         }
@@ -472,23 +472,23 @@ insert_into_merkle_tree(merkle_table & t
       switch (st)
         {
         case leaf_state:
-          {
-            id slotval;
-            node->get_raw_slot(slotnum, slotval);
-            if (slotval == leaf)
-              {
-                // Do nothing, it's already present
-              }
-            else
-              {
-                insert_into_merkle_tree(tab, type, slotval, level+1);
-                insert_into_merkle_tree(tab, type, leaf, level+1);
-                id empty_subtree_hash;
-                node->set_raw_slot(slotnum, empty_subtree_hash);
-                node->set_slot_state(slotnum, subtree_state);
-              }
-          }
-          break;
+        {
+          id slotval;
+          node->get_raw_slot(slotnum, slotval);
+          if (slotval == leaf)
+            {
+              // Do nothing, it's already present
+            }
+          else
+            {
+              insert_into_merkle_tree(tab, type, slotval, level + 1);
+              insert_into_merkle_tree(tab, type, leaf, level + 1);
+              id empty_subtree_hash;
+              node->set_raw_slot(slotnum, empty_subtree_hash);
+              node->set_slot_state(slotnum, subtree_state);
+            }
+        }
+        break;
 
         case empty_state:
           node->total_num_leaves++;
@@ -497,14 +497,14 @@ insert_into_merkle_tree(merkle_table & t
           break;
 
         case subtree_state:
-          {
-            insert_into_merkle_tree(tab, type, leaf, level+1);
-            id empty_subtree_hash;
-            node->set_raw_slot(slotnum, empty_subtree_hash);
-            node->set_slot_state(slotnum, subtree_state);
-          }
-          break;
+        {
+          insert_into_merkle_tree(tab, type, leaf, level + 1);
+          id empty_subtree_hash;
+          node->set_raw_slot(slotnum, empty_subtree_hash);
+          node->set_slot_state(slotnum, subtree_state);
         }
+        break;
+        }
     }
   else
     {
============================================================
--- src/merkle_tree.hh	7ae6a892d28c901e06986c03c90b54a9acf5102d
+++ src/merkle_tree.hh	2ecb86860e435eaca18ca83de9b061cd05b86626
@@ -39,23 +39,23 @@ typedef enum
 // refiner.{cc,hh} for more details.
 
 typedef enum
-  {
-    file_item = 2,
-    key_item = 3,
-    revision_item = 4,
-    cert_item = 5,
-    epoch_item = 6
-  }
+{
+  file_item = 2,
+  key_item = 3,
+  revision_item = 4,
+  cert_item = 5,
+  epoch_item = 6
+}
 netcmd_item_type;
 
 void netcmd_item_type_to_string(netcmd_item_type t, std::string & typestr);
 
 typedef enum
-  {
-    empty_state,
-    leaf_state,
-    subtree_state
-  }
+{
+  empty_state,
+  leaf_state,
+  subtree_state
+}
 slot_state;
 
 struct merkle_node
@@ -87,15 +87,16 @@ typedef boost::shared_ptr<merkle_node> m
 
 typedef boost::shared_ptr<merkle_node> merkle_ptr;
 
-typedef std::pair<prefix,size_t> merkle_node_id;
-namespace hashmap {
+typedef std::pair<prefix, size_t> merkle_node_id;
+namespace hashmap
+{
   template<>
   struct hash<merkle_node_id>
   {
     hash<std::string> sh;
     size_t operator()(merkle_node_id const & m) const
     {
-     return sh(m.first()) + m.second;
+      return sh(m.first()) + m.second;
     }
   };
 }
============================================================
--- src/netcmd.cc	7adb348671629350233209183dab3332410f91d0
+++ src/netcmd.cc	d49554a3bf89dee69513892d64633132445b4572
@@ -58,17 +58,17 @@ size_t netcmd::encoded_size() const
   string tmp;
   insert_datum_uleb128<size_t>(payload.size(), tmp);
   return 1 // netsync version
-       + 1 // command code
-       + tmp.size() + payload.size() // payload as vstring
-       + constants::netsync_hmac_value_length_in_bytes; // hmac
+         + 1 // command code
+         + tmp.size() + payload.size() // payload as vstring
+         + constants::netsync_hmac_value_length_in_bytes; // hmac
 }
 
 bool
 netcmd::operator==(netcmd const & other) const
 {
   return version == other.version &&
-    cmd_code == other.cmd_code &&
-    payload == other.payload;
+         cmd_code == other.cmd_code &&
+         payload == other.payload;
 }
 
 // note: usher_reply_cmd does not get included in the hmac.
@@ -129,7 +129,7 @@ netcmd::read(u8 min_version, u8 max_vers
       // error immediately after this switch.
       if (!too_old && !too_new)
         throw bad_decode(F("unknown netcmd code 0x%x")
-                          % widen<u32,u8>(cmd_byte));
+                         % widen<u32, u8>(cmd_byte));
     }
   // check that the version is reasonable
   if (cmd_code != usher_cmd)
@@ -138,10 +138,10 @@ netcmd::read(u8 min_version, u8 max_vers
         {
           throw bad_decode(F("protocol version mismatch: wanted between '%d' and '%d' got '%d' (netcmd code %d)\n"
                              "%s")
-                           % widen<u32,u8>(min_version)
-                           % widen<u32,u8>(max_version)
-                           % widen<u32,u8>(extracted_ver)
-                           % widen<u32,u8>(cmd_code)
+                           % widen<u32, u8>(min_version)
+                           % widen<u32, u8>(max_version)
+                           % widen<u32, u8>(extracted_ver)
+                           % widen<u32, u8>(cmd_code)
                            % ((max_version < extracted_ver)
                               ? _("the remote side has a newer, incompatible version of monotone")
                               : _("the remote side has an older, incompatible version of monotone")));
@@ -152,8 +152,8 @@ netcmd::read(u8 min_version, u8 max_vers
   // check to see if we have even enough bytes for a complete uleb128
   size_t payload_len = 0;
   if (!try_extract_datum_uleb128<size_t>(inbuf, pos, "netcmd payload length",
-      payload_len))
-      return false;
+                                         payload_len))
+    return false;
 
   // they might have given us a bogus size
   if (payload_len > constants::netcmd_payload_limit)
@@ -298,7 +298,7 @@ netcmd::read_anonymous_cmd(protocol_role
   if (role_byte != static_cast<u8>(source_role)
       && role_byte != static_cast<u8>(sink_role)
       && role_byte != static_cast<u8>(source_and_sink_role))
-    throw bad_decode(F("unknown role specifier %d") % widen<u32,u8>(role_byte));
+    throw bad_decode(F("unknown role specifier %d") % widen<u32, u8>(role_byte));
   role = static_cast<protocol_role>(role_byte);
   string pattern_string;
   extract_variable_length_string(payload, pattern_string, pos,
@@ -344,7 +344,7 @@ netcmd::read_auth_cmd(protocol_role & ro
   if (role_byte != static_cast<u8>(source_role)
       && role_byte != static_cast<u8>(sink_role)
       && role_byte != static_cast<u8>(source_and_sink_role))
-    throw bad_decode(F("unknown role specifier %d") % widen<u32,u8>(role_byte));
+    throw bad_decode(F("unknown role specifier %d") % widen<u32, u8>(role_byte));
   role = static_cast<protocol_role>(role_byte);
   string pattern_string;
   extract_variable_length_string(payload, pattern_string, pos,
@@ -413,9 +413,9 @@ netcmd::read_refine_cmd(refinement_type 
   // syntax is: <u8: refinement type> <node: a merkle tree node>
   size_t pos = 0;
   ty = static_cast<refinement_type>
-    (extract_datum_lsb<u8>
-     (payload, pos,
-      "refine netcmd, refinement type"));
+       (extract_datum_lsb<u8>
+        (payload, pos,
+         "refine netcmd, refinement type"));
   read_node(payload, pos, node);
   assert_end_of_buffer(payload, pos, "refine cmd");
 }
@@ -468,14 +468,14 @@ netcmd::read_data_cmd(netcmd_item_type &
   u8 compressed_p = extract_datum_lsb<u8>(payload, pos,
                                           "data netcmd, compression flag");
   extract_variable_length_string(payload, dat, pos,
-                                  "data netcmd, data payload");
+                                 "data netcmd, data payload");
   if (compressed_p == 1)
-  {
-    gzip<data> zdat(dat, origin::network);
-    data tdat;
-    decode_gzip(zdat, tdat);
-    dat = tdat();
-  }
+    {
+      gzip<data> zdat(dat, origin::network);
+      data tdat;
+      decode_gzip(zdat, tdat);
+      dat = tdat();
+    }
   assert_end_of_buffer(payload, pos, "data netcmd payload");
 }
 
@@ -640,7 +640,7 @@ netcmd::read_automate_headers_reply_cmd(
 {
   size_t pos = 0;
   size_t nheaders = extract_datum_uleb128<size_t>(payload, pos,
-                                               "automate headers reply netcmd, count");
+                                                  "automate headers reply netcmd, count");
   headers.clear();
   for (size_t i = 0; i < nheaders; ++i)
     {
@@ -736,7 +736,7 @@ netcmd::read_automate_packet_cmd(int & c
   command_num = int(extract_datum_uleb128<size_t>(payload, pos,
                                                   "automate_packet netcmd, command_num"));
   stream = char(extract_datum_uleb128<size_t>(payload, pos,
-                                        "automate_packet netcmd, stream"));
+                                              "automate_packet netcmd, stream"));
   extract_variable_length_string(payload, packet_data, pos,
                                  "automate_packet netcmd, packet_data");
   assert_end_of_buffer(payload, pos, "automate_packet netcmd payload");
============================================================
--- src/netcmd.hh	6ac2e5bcf92deae29cbad403b4a0397f0a882734
+++ src/netcmd.hh	7979d0ab7dfd3877465cc36ae1848a139eba5630
@@ -32,7 +32,8 @@ class app_state;
 
 class app_state;
 
-namespace error_codes {
+namespace error_codes
+{
   static const int no_error = 200;
   static const int partial_transfer = 211;
   static const int no_transfer = 212;
@@ -49,62 +50,62 @@ typedef enum
 }
 
 typedef enum
-  {
-    server_voice,
-    client_voice
-  }
+{
+  server_voice,
+  client_voice
+}
 protocol_voice;
 
 typedef enum
-  {
-    source_role = 1,
-    sink_role = 2,
-    source_and_sink_role = 3
-  }
+{
+  source_role = 1,
+  sink_role = 2,
+  source_and_sink_role = 3
+}
 protocol_role;
 
 typedef enum
-  {
-    refinement_query = 0,
-    refinement_response = 1
-  }
+{
+  refinement_query = 0,
+  refinement_response = 1
+}
 refinement_type;
 
 typedef enum
-  {
-    // general commands
-    error_cmd = 0,
-    bye_cmd = 1,
+{
+  // general commands
+  error_cmd = 0,
+  bye_cmd = 1,
 
-    // authentication commands
-    hello_cmd = 2,
-    anonymous_cmd = 3,
-    auth_cmd = 4,
-    confirm_cmd = 5,
+  // authentication commands
+  hello_cmd = 2,
+  anonymous_cmd = 3,
+  auth_cmd = 4,
+  confirm_cmd = 5,
 
-    // refinement commands
-    refine_cmd = 6,
-    done_cmd = 7,
+  // refinement commands
+  refine_cmd = 6,
+  done_cmd = 7,
 
-    // transmission commands
-    data_cmd = 8,
-    delta_cmd = 9,
+  // transmission commands
+  data_cmd = 8,
+  delta_cmd = 9,
 
-    // automation commands
-    automate_cmd = 10,
-    automate_headers_request_cmd = 11,
-    automate_headers_reply_cmd = 12,
-    automate_command_cmd = 13,
-    automate_packet_cmd = 14,
+  // automation commands
+  automate_cmd = 10,
+  automate_headers_request_cmd = 11,
+  automate_headers_reply_cmd = 12,
+  automate_command_cmd = 13,
+  automate_packet_cmd = 14,
 
-    // usher commands
-    // usher_cmd is sent either by a proxy that needs to know where
-    // to forward a connection (the reply gives the desired hostname and
-    // include pattern), or by a server performing protocol
-    // version negotiation.
-    usher_cmd = 100,
-    usher_reply_cmd = 101
-  }
+  // usher commands
+  // usher_cmd is sent either by a proxy that needs to know where
+  // to forward a connection (the reply gives the desired hostname and
+  // include pattern), or by a server performing protocol
+  // version negotiation.
+  usher_cmd = 100,
+  usher_reply_cmd = 101
+}
 netcmd_code;
 
 class netcmd
@@ -128,7 +129,8 @@ public:
             string_queue & inbuf,
             chained_hmac & hmac);
   bool read_string(std::string & inbuf,
-                   chained_hmac & hmac) {
+                   chained_hmac & hmac)
+  {
     // this is here only for the regression tests because they want to
     // read and write to the same type, but we want to have reads from
     // a string queue so that when data is read in from the network it
@@ -137,7 +139,7 @@ public:
     tmp.append(inbuf);
     // allow any version
     bool ret = read(0, 255, tmp, hmac);
-    inbuf = tmp.substr(0,tmp.size());
+    inbuf = tmp.substr(0, tmp.size());
     return ret;
   }
   // i/o functions for each type of command payload
============================================================
--- src/netio.hh	05953ebcfb8b75c0357ef7866e122d4f68c3d90d
+++ src/netio.hh	df8eaeb31e5fb3368b99e55dd3500738e437ea7d
@@ -70,7 +70,7 @@ try_extract_datum_uleb128(std::string co
     {
       if (pos >= in.size())
         return false;
-      T curr = widen<T,u8>(in[pos]);
+      T curr = widen<T, u8>(in[pos]);
       ++pos;
       out |= ((static_cast<u8>(curr)
                & static_cast<u8>(0x7f)) << shift);
@@ -105,7 +105,7 @@ try_extract_datum_uleb128(string_queue c
     {
       if (pos >= in.size())
         return false;
-      T curr = widen<T,u8>(in[pos]);
+      T curr = widen<T, u8>(in[pos]);
       ++pos;
       out |= ((static_cast<u8>(curr)
                & static_cast<u8>(0x7f)) << shift);
@@ -204,7 +204,7 @@ extract_datum_lsb(std::string const & in
 
   while (nbytes > 0)
     {
-      out |= widen<T,u8>(in[pos++]) << shift;
+      out |= widen<T, u8>(in[pos++]) << shift;
       shift += 8;
       --nbytes;
     }
@@ -225,7 +225,7 @@ extract_datum_lsb(string_queue const & i
 
   while (nbytes > 0)
     {
-      out |= widen<T,u8>(in[pos++]) << shift;
+      out |= widen<T, u8>(in[pos++]) << shift;
       shift += 8;
       --nbytes;
     }
@@ -243,7 +243,7 @@ insert_datum_lsb(T in, std::string & out
       tmp[i] = static_cast<u8>(in) & static_cast<u8>(0xff);
       in >>= 8;
     }
-  out.append(std::string(tmp, tmp+nbytes));
+  out.append(std::string(tmp, tmp + nbytes));
 }
 
 template <typename T>
@@ -257,7 +257,7 @@ insert_datum_lsb(T in, string_queue & ou
       tmp[i] = static_cast<u8>(in) & static_cast<u8>(0xff);
       in >>= 8;
     }
-  out.append(std::string(tmp, tmp+nbytes));
+  out.append(std::string(tmp, tmp + nbytes));
 }
 
 inline void
============================================================
--- src/numeric_vocab.hh	857d25e240304dd485efc94ada24a1774eaf78d2
+++ src/numeric_vocab.hh	e9ed3d772acc4820f0ef6a88d20f060c93ee51b6
@@ -12,7 +12,7 @@
 
 #include <cstddef>
 #include <climits>              // Some architectures need this for CHAR_BIT
-                                // The lack of this was reported as bug #19984
+// The lack of this was reported as bug #19984
 #include <limits>
 #include <boost/static_assert.hpp>
 
@@ -58,16 +58,16 @@ widen(V const & v)
 // one of the users (the unit tests for dates.cc) to check it at runtime.
 
 #if defined LONG_MAX && LONG_MAX > UINT_MAX
-  #define PROBABLE_S64_MAX LONG_MAX
-  #define s64_C(x) x##L
+#define PROBABLE_S64_MAX LONG_MAX
+#define s64_C(x) x##L
 #elif defined LLONG_MAX && LLONG_MAX > UINT_MAX
-  #define PROBABLE_S64_MAX LLONG_MAX
-  #define s64_C(x) x##LL
+#define PROBABLE_S64_MAX LLONG_MAX
+#define s64_C(x) x##LL
 #elif defined LONG_LONG_MAX && LONG_LONG_MAX > UINT_MAX
-  #define PROBABLE_S64_MAX LONG_LONG_MAX
-  #define s64_C(x) x##LL
+#define PROBABLE_S64_MAX LONG_LONG_MAX
+#define s64_C(x) x##LL
 #else
-  #error "How do I write a constant of type s64?"
+#error "How do I write a constant of type s64?"
 #endif
 
 #endif
============================================================
--- src/mkstemp.cc	e4bc406d8843b959ece7978ea83fb29889157646
+++ src/mkstemp.cc	035e5522db72d472b4ad9db562f861349b626e7f
@@ -63,22 +63,22 @@
 #include <unistd.h>
 
 #ifdef _MSC_VER  // bleh, is this really necessary?
- #undef open
- #define open(p, f, m) _open(p, f, m)
- #undef close
- #define close(f) _close(f)
- #undef O_RDWR
- #define O_RDWR _O_RDWR
- #undef O_CREAT
- #define O_CREAT _O_CREAT
- #undef O_EXCL
- #define O_EXCL _O_EXCL
- #undef O_BINARY
- #define O_BINARY _O_BINARY
+#undef open
+#define open(p, f, m) _open(p, f, m)
+#undef close
+#define close(f) _close(f)
+#undef O_RDWR
+#define O_RDWR _O_RDWR
+#undef O_CREAT
+#define O_CREAT _O_CREAT
+#undef O_EXCL
+#define O_EXCL _O_EXCL
+#undef O_BINARY
+#define O_BINARY _O_BINARY
 #endif
 
 #ifndef O_BINARY
- #define O_BINARY 0
+#define O_BINARY 0
 #endif
 
 using std::string;
@@ -155,7 +155,7 @@ monotone_mkstemp(string & tmpl)
           x /= NLETTERS;
         }
 
-      int fd = open(&buf[0], O_RDWR|O_CREAT|O_EXCL|O_BINARY, 0600);
+      int fd = open(&buf[0], O_RDWR | O_CREAT | O_EXCL | O_BINARY, 0600);
       if (fd >= 0)
         {
           close(fd);
============================================================
--- src/platform.hh	4b86bdcb86f3976266a0292855ef4da2ca313271
+++ src/platform.hh	8e0d9691e67cfe829b5ccc49daaabff22640ffaa
@@ -19,19 +19,19 @@ void get_system_flavour(std::string & id
 
 void read_password(std::string const & prompt, char * buf, size_t bufsz);
 void get_system_flavour(std::string & ident);
-bool is_executable(const char *path);
+bool is_executable(const char * path);
 
 // For LUA
-int existsonpath(const char *exe);
-int set_executable(const char *path);
-int clear_executable(const char *path);
+int existsonpath(const char * exe);
+int set_executable(const char * path);
+int clear_executable(const char * path);
 pid_t process_spawn(const char * const argv[]);
 pid_t process_spawn_redirected(char const * in,
                                char const * out,
                                char const * err,
                                char const * const argv[]);
-pid_t process_spawn_pipe(char const * const argv[], FILE** in, FILE** out);
-int process_wait(pid_t pid, int *res, int timeout = -1);// default infinite
+pid_t process_spawn_pipe(char const * const argv[], FILE ** in, FILE ** out);
+int process_wait(pid_t pid, int * res, int timeout = -1); // default infinite
 int process_kill(pid_t pid, int signal);
 int process_sleep(unsigned int seconds);
 
@@ -39,7 +39,7 @@ void make_io_binary();
 void make_io_binary();
 
 #ifdef WIN32
-std::string munge_argv_into_cmdline(const char* const argv[]);
+std::string munge_argv_into_cmdline(const char * const argv[]);
 #endif
 // for term selection
 bool have_smart_terminal();
@@ -92,7 +92,7 @@ protected:
   virtual void note_nowish(bool f = true) = 0;
   virtual ~inodeprint_calculator() {};
 protected:
-  virtual void add_item(void *dat, size_t size) = 0;
+  virtual void add_item(void * dat, size_t size) = 0;
 };
 bool inodeprint_file(std::string const & file, inodeprint_calculator & calc);
 
@@ -169,7 +169,7 @@ std::string get_locale_dir();
 // throws on failure.
 //
 // This is strptime on Unix, something else on MinGW.
-void parse_date(const std::string s, const std::string fmt, struct tm *tp);
+void parse_date(const std::string s, const std::string fmt, struct tm * tp);
 
 #endif // __PLATFORM_HH__
 
============================================================
--- src/lcs.cc	a7b9e3b57dea4f4648ca2dd3de5cd301ec36802e
+++ src/lcs.cc	8916a27e95b7b0e935c27ffbb25f7a016b8e1a26
@@ -146,9 +146,9 @@ vector<long, QA(long)> work_vec::vec;
 
 vector<long, QA(long)> work_vec::vec;
 
-template <typename A,
-          typename B,
-          typename LCS>
+template < typename A,
+         typename B,
+         typename LCS >
 struct jaffer_edit_calculator
 {
 
@@ -183,9 +183,9 @@ struct jaffer_edit_calculator
     inline vt const & operator[](size_t idx) const
     {
       if (end < start)
-          return *(base + (start - (idx + 1)));
+        return *(base + (start - (idx + 1)));
       else
-          return *(base + (start + idx));
+        return *(base + (start + idx));
     }
   };
 
@@ -194,10 +194,10 @@ struct jaffer_edit_calculator
                   subarray<B> const & b, long b_len,
                   cost_vec & CC, long p)
   {
-    long cost = k + 2*p;
+    long cost = k + 2 * p;
 
     // do the run
-    long y = max(fp[k-1]+1, fp[k+1]);
+    long y = max(fp[k-1] + 1, fp[k+1]);
     long x = y - k;
 
     I(y >= 0);
@@ -231,7 +231,7 @@ struct jaffer_edit_calculator
                       bool full_scan = true)
   {
     long const delta = len_b - len_a;
-    long lo = -(len_a+1), hi = (1+len_b);
+    long lo = -(len_a + 1), hi = (1 + len_b);
     if (full_scan)
       {
         lo = -(p_lim + 1);
@@ -260,7 +260,7 @@ struct jaffer_edit_calculator
           break;
       }
 
-    return delta + 2*p;
+    return delta + 2 * p;
   }
 
   // This splits the edit graph into a top half and a bottom half, calculates
@@ -353,16 +353,16 @@ struct jaffer_edit_calculator
                         cost_vec const & cc,
                         long cost)
   {
-    long cdx = 1 + n/2;
-    long rdx = n/2;
+    long cdx = 1 + n / 2;
+    long rdx = n / 2;
     while (true)
       {
         I (rdx >= 0);
 
         if (cost == (cc[rdx] + rr[n-rdx]))
-            return rdx;
+          return rdx;
         if (cost == (cc[cdx] + rr[n-cdx]))
-            return cdx;
+          return cdx;
         --rdx;
         ++cdx;
       }
@@ -474,12 +474,12 @@ struct jaffer_edit_calculator
     long delta = (new_last_b - new_start_b) - (new_last_a - new_start_a);
 
     if (delta < 0)
-      return diff_to_ez (b, new_start_b, new_last_b+1,
-                         a, new_start_a, new_last_a+1,
+      return diff_to_ez (b, new_start_b, new_last_b + 1,
+                         a, new_start_a, new_last_a + 1,
                          edits, edx, -polarity, delta + p_lim);
     else
-      return diff_to_ez (a, new_start_a, new_last_a+1,
-                         b, new_start_b, new_last_b+1,
+      return diff_to_ez (a, new_start_a, new_last_a + 1,
+                         b, new_start_b, new_last_b + 1,
                          edits, edx, polarity, p_lim);
   }
 
@@ -526,7 +526,7 @@ struct jaffer_edit_calculator
                 for (long idx = bdx, edx = edx0;
                      idx < end_b;
                      ++idx, ++edx)
-                  edits[edx] = polarity * (idx+1);
+                  edits[edx] = polarity * (idx + 1);
 
                 return len_b - len_a;
               }
@@ -537,7 +537,7 @@ struct jaffer_edit_calculator
               }
             else
               {
-                edits[edx0] = polarity * (bdx+1);
+                edits[edx0] = polarity * (bdx + 1);
                 ++bdx; ++edx0;
               }
           }
@@ -609,15 +609,15 @@ struct jaffer_edit_calculator
 };
 
 
-template <typename A,
-          typename B,
-          typename LCS>
+template < typename A,
+         typename B,
+         typename LCS >
 void _edit_script(A begin_a, A end_a,
                   B begin_b, B end_b,
                   vector<long, QA(long)> & edits_out,
                   LCS ignored_out)
 {
-  typedef jaffer_edit_calculator<A,B,LCS> calc_t;
+  typedef jaffer_edit_calculator<A, B, LCS> calc_t;
   long len_a = end_a - begin_a;
   long len_b = end_b - begin_b;
   typename calc_t::edit_vec edits, ordered;
@@ -644,14 +644,14 @@ void _edit_script(A begin_a, A end_a,
 }
 
 
-template <typename A,
-          typename B,
-          typename LCS>
+template < typename A,
+         typename B,
+         typename LCS >
 void _longest_common_subsequence(A begin_a, A end_a,
                                  B begin_b, B end_b,
                                  LCS out)
 {
-  typedef jaffer_edit_calculator<A,B,LCS> calc_t;
+  typedef jaffer_edit_calculator<A, B, LCS> calc_t;
   long len_a = end_a - begin_a;
   long len_b = end_b - begin_b;
   typename calc_t::edit_vec edits, ordered;
============================================================
--- src/revision.hh	740c4dd4ee350fcf06af3ba707cef3dadecb46f8
+++ src/revision.hh	44d34f9c51f853422649de195ba7e592df4382fe
@@ -47,7 +47,7 @@ struct
 enum made_for { made_for_nobody, made_for_workspace, made_for_database };
 
 struct
-revision_t : public origin_aware
+  revision_t : public origin_aware
 {
   void check_sane() const;
   bool is_merge_node() const;
@@ -68,7 +68,7 @@ class graph_loader
 
 class graph_loader
 {
- public:
+public:
   graph_loader(database & db) : db(db) {}
 
   void load_parents(revision_id const rid, std::set<revision_id> & parents);
@@ -76,7 +76,7 @@ class graph_loader
   void load_ancestors(std::set<revision_id> & revs);
   void load_descendants(std::set<revision_id> & revs);
 
- private:
+private:
   database & db;
   enum load_direction { ancestors, descendants };
 
============================================================
--- src/basic_io.cc	95ae5bc17a2c7140bfe13691036591b8975bcf41
+++ src/basic_io.cc	71218bcaf90a2006d525e6415e630af029d6fcb8
@@ -81,7 +81,7 @@ void basic_io::stanza::push_hex_pair(sym
 {
   entries.push_back(make_pair(k, ""));
   string const & s(v());
-  entries.back().second.reserve(s.size()+2);
+  entries.back().second.reserve(s.size() + 2);
   entries.back().second.push_back('[');
   entries.back().second.append(s);
   entries.back().second.push_back(']');
============================================================
--- src/basic_io.hh	64cbd940961328cf9c6a868e015424e35efad4cf
+++ src/basic_io.hh	39572b45cd5f6028b163236683399d422102e2e3
@@ -28,33 +28,33 @@ namespace basic_io
 {
 
   namespace
+  {
+    namespace syms
     {
-      namespace syms
-        {
-          // general format symbol
-          symbol const format_version("format_version");
+      // general format symbol
+      symbol const format_version("format_version");
 
-          // common symbols
-          symbol const dir("dir");
-          symbol const file("file");
-          symbol const content("content");
-          symbol const size("size");
-          symbol const attr("attr");
+      // common symbols
+      symbol const dir("dir");
+      symbol const file("file");
+      symbol const content("content");
+      symbol const size("size");
+      symbol const attr("attr");
 
-          symbol const content_mark("content_mark");
-        }
+      symbol const content_mark("content_mark");
     }
+  }
 
   typedef enum
-    {
-      TOK_SYMBOL,
-      TOK_STRING,
-      TOK_HEX,
-      TOK_NONE
-    } token_type;
+  {
+    TOK_SYMBOL,
+    TOK_STRING,
+    TOK_HEX,
+    TOK_NONE
+  } token_type;
 
   struct
-  input_source : public origin_aware
+    input_source : public origin_aware
   {
     size_t line, col;
     std::string const & in;
@@ -76,7 +76,7 @@ namespace basic_io
       if (LIKELY(curr != in.end()))
         // we do want to distinguish between EOF and '\xff',
         // so we translate '\xff' to 255u
-        lookahead = widen<unsigned int,char>(*curr);
+        lookahead = widen<unsigned int, char>(*curr);
       else
         lookahead = EOF;
     }
@@ -100,7 +100,7 @@ namespace basic_io
   };
 
   struct
-  tokenizer
+    tokenizer
   {
     input_source & in;
     std::string::const_iterator begin;
@@ -239,13 +239,13 @@ namespace basic_io
       else
         return basic_io::TOK_NONE;
     }
-   void err(std::string const & s);
+    void err(std::string const & s);
   };
 
   std::string escape(std::string const & s);
 
   struct
-  stanza
+    stanza
   {
     stanza();
     size_t indent;
@@ -254,7 +254,7 @@ namespace basic_io
     void push_hex_pair(symbol const & k, hexenc<id> const & v);
     void push_binary_pair(symbol const & k, id const & v);
     void push_binary_triple(symbol const & k, std::string const & n,
-                         id const & v);
+                            id const & v);
     void push_str_pair(symbol const & k, std::string const & v);
     void push_str_pair(symbol const & k, symbol const & v);
     void push_str_triple(symbol const & k, std::string const & n,
@@ -272,7 +272,7 @@ namespace basic_io
   // may be referenced (globally). An invariant will be triggered
   // if more than one basic_io::printer is instantiated.
   struct
-  printer
+    printer
   {
     static std::string buf;
     static int count;
@@ -282,7 +282,7 @@ namespace basic_io
   };
 
   struct
-  parser
+    parser
   {
     tokenizer & tok;
     parser(tokenizer & t) : tok(t)
============================================================
--- src/mt_version.cc	e73c0a25e76ba110976a4a0b94f39f5295c1dd75
+++ src/mt_version.cc	f0098d0c1b96e6719c2c52e0991fbd1d0263e6b8
@@ -76,7 +76,7 @@ get_full_version(string & out)
          % Botan::version_major() % Botan::version_minor() % Botan::version_patch()
          % BOTAN_VERSION_MAJOR % BOTAN_VERSION_MINOR % BOTAN_VERSION_PATCH
          % string(package_full_revision_constant))
-    .str();
+        .str();
 }
 
 void
============================================================
--- src/automate.cc	efa4ecceab7f1e31f71778f325da312bf6aefba5
+++ src/automate.cc	8d490e988b46a2751ab98037226a51b1822e03ae
@@ -136,19 +136,20 @@ CMD_AUTOMATE(ancestors, N_("REV1 [REV2 [
     {
       revision_id rid = frontier.back();
       frontier.pop_back();
-      if(!null_id(rid)) {
-        set<revision_id> parents;
-        db.get_revision_parents(rid, parents);
-        for (set<revision_id>::const_iterator i = parents.begin();
-             i != parents.end(); ++i)
-          {
-            if (ancestors.find(*i) == ancestors.end())
-              {
-                frontier.push_back(*i);
-                ancestors.insert(*i);
-              }
-          }
-      }
+      if(!null_id(rid))
+        {
+          set<revision_id> parents;
+          db.get_revision_parents(rid, parents);
+          for (set<revision_id>::const_iterator i = parents.begin();
+               i != parents.end(); ++i)
+            {
+              if (ancestors.find(*i) == ancestors.end())
+                {
+                  frontier.push_back(*i);
+                  ancestors.insert(*i);
+                }
+            }
+        }
     }
   for (set<revision_id>::const_iterator i = ancestors.begin();
        i != ancestors.end(); ++i)
@@ -378,7 +379,7 @@ CMD_AUTOMATE(roots, "",
   db.get_revision_children(nullid, roots);
   for (set<revision_id>::const_iterator i = roots.begin();
        i != roots.end(); ++i)
-      output << *i << '\n';
+    output << *i << '\n';
 }
 
 // Name: parents
@@ -408,8 +409,8 @@ CMD_AUTOMATE(parents, N_("REV"),
   db.get_revision_parents(rid, parents);
   for (set<revision_id>::const_iterator i = parents.begin();
        i != parents.end(); ++i)
-      if (!null_id(*i))
-          output << *i << '\n';
+    if (!null_id(*i))
+      output << *i << '\n';
 }
 
 // Name: children
@@ -439,8 +440,8 @@ CMD_AUTOMATE(children, N_("REV"),
   db.get_revision_children(rid, children);
   for (set<revision_id>::const_iterator i = children.begin();
        i != children.end(); ++i)
-      if (!null_id(*i))
-          output << *i << '\n';
+    if (!null_id(*i))
+      output << *i << '\n';
 }
 
 // Name: graph
@@ -486,13 +487,13 @@ CMD_AUTOMATE(graph, "",
       if (null_id(i->second))
         continue;
       map<revision_id, set<revision_id> >::iterator
-        j = child_to_parents.find(i->first);
+      j = child_to_parents.find(i->first);
       I(j->first == i->first);
       j->second.insert(i->second);
     }
 
   for (map<revision_id, set<revision_id> >::const_iterator
-         i = child_to_parents.begin();
+       i = child_to_parents.begin();
        i != child_to_parents.end(); ++i)
     {
       output << i->first;
@@ -693,7 +694,7 @@ inventory_determine_corresponding_paths(
                                         vector<file_path> & additional_excludes)
 {
   // at first check the includes vector
-  for (int i=0, s=includes.size(); i<s; i++)
+  for (int i = 0, s = includes.size(); i < s; i++)
     {
       file_path fp = includes.at(i);
 
@@ -730,7 +731,7 @@ inventory_determine_corresponding_paths(
 
   // and now the excludes vector
   vector<file_path> new_excludes;
-  for (int i=0, s=excludes.size(); i<s; i++)
+  for (int i = 0, s = excludes.size(); i < s; i++)
     {
       file_path fp = excludes.at(i);
 
@@ -864,19 +865,19 @@ inventory_determine_states(workspace & w
       item.new_node.exists &&
       item.old_node.id != item.new_node.id)
     {
-        if (new_roster.has_node(item.old_node.id))
-          states.push_back("rename_source");
-        else
-          states.push_back("dropped");
+      if (new_roster.has_node(item.old_node.id))
+        states.push_back("rename_source");
+      else
+        states.push_back("dropped");
 
-        if (old_roster.has_node(item.new_node.id))
-          states.push_back("rename_target");
-        else
-          states.push_back("added");
+      if (old_roster.has_node(item.new_node.id))
+        states.push_back("rename_target");
+      else
+        states.push_back("added");
     }
   // this can be either a drop or a renamed item
   else if (item.old_node.exists &&
-          !item.new_node.exists)
+           !item.new_node.exists)
     {
       if (new_roster.has_node(item.old_node.id))
         states.push_back("rename_source");
@@ -885,7 +886,7 @@ inventory_determine_states(workspace & w
     }
   // this can be either an add or a renamed item
   else if (!item.old_node.exists &&
-            item.new_node.exists)
+           item.new_node.exists)
     {
       if (old_roster.has_node(item.new_node.id))
         states.push_back("rename_target");
@@ -905,8 +906,8 @@ inventory_determine_states(workspace & w
           // file that is ignored but not in an ignored directory.
           if (work.ignore_file(fs_path))
             W(F("'%s' is both known and ignored; "
-              "it will be shown as 'missing'. Check '.mtn-ignore'")
-            % fs_path);
+                "it will be shown as 'missing'. Check '.mtn-ignore'")
+              % fs_path);
         }
     }
   else // exists on filesystem
@@ -1744,7 +1745,7 @@ CMD_AUTOMATE(common_ancestors, N_("REV1 
 
   for (set<revision_id>::const_iterator i = common_ancestors.begin();
        i != common_ancestors.end(); ++i)
-      output << *i << "\n";
+    output << *i << "\n";
 }
 
 // Name: branches
@@ -1824,10 +1825,11 @@ CMD_AUTOMATE(tags, N_("[BRANCH_PATTERN]"
   globish incl("*", origin::internal);
   bool filtering(false);
 
-  if (args.size() == 1) {
-    incl = globish(idx(args, 0)(), origin::user);
-    filtering = true;
-  }
+  if (args.size() == 1)
+    {
+      incl = globish(idx(args, 0)(), origin::user);
+      filtering = true;
+    }
 
   basic_io::printer prt;
 
@@ -1946,7 +1948,7 @@ CMD_AUTOMATE(get_content_changed, N_("RE
     F("no revision %s found in database") % ident);
   db.get_roster(ident, new_roster, mm);
 
-  file_path path = file_path_external(idx(args,1));
+  file_path path = file_path_external(idx(args, 1));
   E(new_roster.has_node(path), origin::user,
     F("file '%s' is unknown for revision %s")
     % path % ident);
@@ -1962,7 +1964,7 @@ CMD_AUTOMATE(get_content_changed, N_("RE
       st.push_binary_pair(basic_io::syms::content_mark, i->inner());
       prt.print_stanza(st);
     }
-    output.write(prt.buf.data(), prt.buf.size());
+  output.write(prt.buf.data(), prt.buf.size());
 }
 
 // Name: get_corresponding_path
@@ -2013,7 +2015,7 @@ CMD_AUTOMATE(get_corresponding_path, N_(
     F("no revision %s found in database") % old_ident);
   db.get_roster(old_ident, old_roster);
 
-  file_path path = file_path_external(idx(args,1));
+  file_path path = file_path_external(idx(args, 1));
   E(new_roster.has_node(path), origin::user,
     F("file '%s' is unknown for revision %s") % path % ident);
 
@@ -2385,7 +2387,7 @@ CMD_AUTOMATE(lua, "LUA_FUNCTION [ARG1 [A
   std::vector<std::string> func_args;
   if (args.size() > 1)
     {
-      for (unsigned int i=1; i<args.size(); i++)
+      for (unsigned int i = 1; i < args.size(); i++)
         {
           func_args.push_back(idx(args, i)());
         }
@@ -2476,7 +2478,7 @@ automate_stdio_shared_body(app_state & a
 std::pair<int, string> automate_stdio_helpers::
 automate_stdio_shared_body(app_state & app,
                            std::vector<std::string> const & cmdline,
-                           std::vector<std::pair<std::string,std::string> >
+                           std::vector<std::pair<std::string, std::string> >
                            const & params,
                            std::ostream & os,
                            boost::function<void()> init_fn,
============================================================
--- src/database_check.cc	b926df22bc2ec6134600d55072462c9a3df1b8b3
+++ src/database_check.cc	5dbb5cc92981d63f9c7dfdf2d43a3d5e71e042b1
@@ -49,7 +49,8 @@ using std::vector;
 using std::string;
 using std::vector;
 
-struct checked_cert {
+struct checked_cert
+{
   cert rcert;
   bool found_key;
   bool good_sig;
@@ -57,7 +58,8 @@ struct checked_cert {
   checked_cert(cert const & c): rcert(c), found_key(false), good_sig(false) {}
 };
 
-struct checked_key {
+struct checked_key
+{
   bool found;       // found public keypair id in db
   size_t sigs;                // number of signatures by this key
 
@@ -66,7 +68,8 @@ struct checked_key {
   checked_key(): found(false), sigs(0) {}
 };
 
-struct checked_file {
+struct checked_file
+{
   bool found;           // found in db, retrieved and verified sha1 hash
   bool size_ok;         // recorded file size is correct
   size_t roster_refs; // number of roster references to this file
@@ -74,7 +77,8 @@ struct checked_file {
   checked_file(): found(false), size_ok(false), roster_refs(0) {}
 };
 
-struct checked_roster {
+struct checked_roster
+{
   bool found;           // found in db, retrieved and verified sha1 hash
   size_t revision_refs; // number of revision references to this roster
   size_t missing_files; // number of missing files referenced by this roster
@@ -96,7 +100,8 @@ struct checked_roster {
 // of times it is listed as a child in the ancestry cache
 // (ancestry_child_refs)
 
-struct checked_revision {
+struct checked_revision
+{
   bool found;                  // found in db, retrieved and verified sha1 hash
   size_t revision_refs;        // number of references to this revision from other revisions
   size_t ancestry_parent_refs; // number of references to this revision by ancestry parent
@@ -128,14 +133,16 @@ struct checked_revision {
     cert_refs(0), parseable(false), normalized(false) {}
 };
 
-struct checked_height {
+struct checked_height
+{
   bool found;                  // found in db
   bool unique;                 // not identical to any height retrieved earlier
   bool sensible;               // greater than all parent heights
   checked_height(): found(false), unique(false), sensible(true) {}
 };
 
-struct checked_branch {
+struct checked_branch
+{
   bool used;
   bool heads_ok;
   bool cached;
@@ -149,9 +156,9 @@ check_db_integrity_check(database & db)
 static void
 check_db_integrity_check(database & db)
 {
-    L(FL("asking sqlite to check db integrity"));
-    E(db.check_integrity(), origin::database,
-      F("file structure is corrupted; cannot check further"));
+  L(FL("asking sqlite to check db integrity"));
+  E(db.check_integrity(), origin::database,
+    F("file structure is corrupted; cannot check further"));
 }
 
 static void
@@ -162,7 +169,7 @@ check_files(database & db, map<file_id, 
   db.get_file_ids(files);
   L(FL("checking %d files") % files.size());
 
-  ticker ticks(_("files"), "f", files.size()/70+1);
+  ticker ticks(_("files"), "f", files.size() / 70 + 1);
 
   for (set<file_id>::const_iterator i = files.begin();
        i != files.end(); ++i)
@@ -205,7 +212,7 @@ check_rosters_manifest(database & db,
   db.get_roster_ids(rosters);
   L(FL("checking %d rosters, manifest pass") % rosters.size());
 
-  ticker ticks(_("rosters"), "r", rosters.size()/70+1);
+  ticker ticks(_("rosters"), "r", rosters.size() / 70 + 1);
 
   for (set<revision_id>::const_iterator i = rosters.begin();
        i != rosters.end(); ++i)
@@ -259,12 +266,12 @@ check_rosters_marking(database & db,
 // This function assumes that check_revisions has been called!
 static void
 check_rosters_marking(database & db,
-              map<revision_id, checked_roster> & checked_rosters,
-              map<revision_id, checked_revision> & checked_revisions)
+                      map<revision_id, checked_roster> & checked_rosters,
+                      map<revision_id, checked_revision> & checked_revisions)
 {
   L(FL("checking %d rosters, marking pass") % checked_rosters.size());
 
-  ticker ticks(_("markings"), "m", checked_rosters.size()/70+1);
+  ticker ticks(_("markings"), "m", checked_rosters.size() / 70 + 1);
 
   for (map<revision_id, checked_roster>::const_iterator i
        = checked_rosters.begin(); i != checked_rosters.end(); i++)
@@ -272,7 +279,7 @@ check_rosters_marking(database & db,
       revision_id ros_id = i->first;
       L(FL("checking roster %s") % i->first);
       if (!i->second.found)
-          continue;
+        continue;
 
       // skip marking check on unreferenced rosters -- they're left by
       // kill_rev_locally, and not expected to have everything they
@@ -311,7 +318,7 @@ check_rosters_marking(database & db,
                 checked_rosters[ros_id].missing_mark_revs++;
             }
 
-          for (map<attr_key,set<revision_id> >::const_iterator attr =
+          for (map<attr_key, set<revision_id> >::const_iterator attr =
                  mark->attrs.begin(); attr != mark->attrs.end(); attr++)
             for (set<revision_id>::const_iterator r = attr->second.begin();
                  r != attr->second.end(); r++)
@@ -337,7 +344,7 @@ check_revisions(database & db,
   db.get_revision_ids(revisions);
   L(FL("checking %d revisions") % revisions.size());
 
-  ticker ticks(_("revisions"), "r", revisions.size()/70+1);
+  ticker ticks(_("revisions"), "r", revisions.size() / 70 + 1);
 
   for (set<revision_id>::const_iterator i = revisions.begin();
        i != revisions.end(); ++i)
@@ -367,7 +374,7 @@ check_revisions(database & db,
       write_revision(rev, norm_data);
       calculate_ident(norm_data, norm_ident);
       if (norm_ident == *i)
-          checked_revisions[*i].normalized = true;
+        checked_revisions[*i].normalized = true;
 
       // roster checks
       if (db.roster_version_exists(*i))
@@ -408,7 +415,7 @@ check_revisions(database & db,
   // now check for parent revision existence and problems
 
   for (map<revision_id, checked_revision>::iterator
-         revision = checked_revisions.begin();
+       revision = checked_revisions.begin();
        revision != checked_revisions.end(); ++revision)
     {
       for (set<revision_id>::const_iterator p = revision->second.parents.begin();
@@ -433,7 +440,7 @@ check_ancestry(database & db,
   db.get_forward_ancestry(graph);
   L(FL("checking %d ancestry edges") % graph.size());
 
-  ticker ticks(_("ancestry"), "a", graph.size()/70+1);
+  ticker ticks(_("ancestry"), "a", graph.size() / 70 + 1);
 
   // checked revision has set of parents
   // graph has revision and associated parents
@@ -491,7 +498,7 @@ check_certs(database & db,
 
   L(FL("checking %d revision certs") % certs.size());
 
-  ticker ticks(_("certs"), "c", certs.size()/70+1);
+  ticker ticks(_("certs"), "c", certs.size() / 70 + 1);
 
   for (vector<cert>::const_iterator i = certs.begin();
        i != certs.end(); ++i)
@@ -504,7 +511,7 @@ check_certs(database & db,
           string signed_text;
           i->signable_text(signed_text);
           checked.good_sig
-            = (db.check_signature(i->key, signed_text, i->sig) == cert_ok);
+          = (db.check_signature(i->key, signed_text, i->sig) == cert_ok);
         }
 
       checked_keys[i->key].sigs++;
@@ -534,7 +541,7 @@ check_heights(database & db,
 
   set<rev_height> seen;
 
-  ticker ticks(_("heights"), "h", heights.size()/70+1);
+  ticker ticks(_("heights"), "h", heights.size() / 70 + 1);
 
   for (set<revision_id>::const_iterator i = heights.begin();
        i != heights.end(); ++i)
@@ -578,7 +585,7 @@ check_heights_relation(database & db,
 
   L(FL("checking heights for %d edges") % graph.size());
 
-  ticker ticks(_("height relations"), "h", graph.size()/70+1);
+  ticker ticks(_("height relations"), "h", graph.size() / 70 + 1);
 
   typedef multimap<revision_id, revision_id>::const_iterator gi;
   for (gi i = graph.begin(); i != graph.end(); ++i)
@@ -695,7 +702,7 @@ report_files(map<file_id, checked_file> 
              size_t & missing_or_invalid_file_sizes)
 {
   for (map<file_id, checked_file>::const_iterator
-         i = checked_files.begin(); i != checked_files.end(); ++i)
+       i = checked_files.begin(); i != checked_files.end(); ++i)
     {
       checked_file file = i->second;
 
@@ -722,11 +729,11 @@ report_rosters(map<revision_id, checked_
 
 static void
 report_rosters(map<revision_id, checked_roster> const & checked_rosters,
-                 size_t & unreferenced_rosters,
-                 size_t & incomplete_rosters)
+               size_t & unreferenced_rosters,
+               size_t & incomplete_rosters)
 {
   for (map<revision_id, checked_roster>::const_iterator
-         i = checked_rosters.begin(); i != checked_rosters.end(); ++i)
+       i = checked_rosters.begin(); i != checked_rosters.end(); ++i)
     {
       checked_roster roster = i->second;
 
@@ -765,7 +772,7 @@ report_revisions(map<revision_id, checke
                  size_t & non_normalized_revisions)
 {
   for (map<revision_id, checked_revision>::const_iterator
-         i = checked_revisions.begin(); i != checked_revisions.end(); ++i)
+       i = checked_revisions.begin(); i != checked_revisions.end(); ++i)
     {
       checked_revision revision = i->second;
 
@@ -867,7 +874,7 @@ report_keys(map<key_id, checked_key> con
             size_t & missing_keys)
 {
   for (map<key_id, checked_key>::const_iterator
-         i = checked_keys.begin(); i != checked_keys.end(); ++i)
+       i = checked_keys.begin(); i != checked_keys.end(); ++i)
     {
       checked_key key = i->second;
 
@@ -902,7 +909,7 @@ report_certs(map<revision_id, checked_re
   cnames.insert(cert_name(date_cert_name));
 
   for (map<revision_id, checked_revision>::const_iterator
-         i = checked_revisions.begin(); i != checked_revisions.end(); ++i)
+       i = checked_revisions.begin(); i != checked_revisions.end(); ++i)
     {
       checked_revision revision = i->second;
       map<cert_name, size_t> cert_counts;
@@ -963,7 +970,7 @@ report_heights(map<revision_id, checked_
                size_t & incorrect_heights)
 {
   for (map<revision_id, checked_height>::const_iterator
-         i = checked_heights.begin(); i != checked_heights.end(); ++i)
+       i = checked_heights.begin(); i != checked_heights.end(); ++i)
     {
       checked_height height = i->second;
 
@@ -1168,32 +1175,32 @@ check_db(database & db)
     W(F("%d branches missing from branch cache") % missing_branches);
 
   size_t total = missing_files + unreferenced_files +
-    missing_or_invalid_file_sizes +
-    unreferenced_rosters + incomplete_rosters +
-    missing_revisions + incomplete_revisions +
-    non_parseable_revisions + non_normalized_revisions +
-    mismatched_parents + mismatched_children +
-    bad_history +
-    missing_rosters +
-    missing_certs + mismatched_certs +
-    unchecked_sigs + bad_sigs +
-    missing_keys +
-    missing_heights + duplicate_heights + incorrect_heights +
-    extra_branches + bad_branches + missing_branches;
+                 missing_or_invalid_file_sizes +
+                 unreferenced_rosters + incomplete_rosters +
+                 missing_revisions + incomplete_revisions +
+                 non_parseable_revisions + non_normalized_revisions +
+                 mismatched_parents + mismatched_children +
+                 bad_history +
+                 missing_rosters +
+                 missing_certs + mismatched_certs +
+                 unchecked_sigs + bad_sigs +
+                 missing_keys +
+                 missing_heights + duplicate_heights + incorrect_heights +
+                 extra_branches + bad_branches + missing_branches;
 
   // unreferenced files and rosters and mismatched certs are not actually
   // serious errors; odd, but nothing will break.
   size_t serious = missing_files + missing_or_invalid_file_sizes +
-    incomplete_rosters + missing_rosters +
-    missing_revisions + incomplete_revisions +
-    non_parseable_revisions + non_normalized_revisions +
-    mismatched_parents + mismatched_children + manifest_mismatch +
-    bad_history +
-    missing_certs +
-    unchecked_sigs + bad_sigs +
-    missing_keys +
-    missing_heights + duplicate_heights + incorrect_heights+
-    extra_branches + bad_branches + missing_branches;
+                   incomplete_rosters + missing_rosters +
+                   missing_revisions + incomplete_revisions +
+                   non_parseable_revisions + non_normalized_revisions +
+                   mismatched_parents + mismatched_children + manifest_mismatch +
+                   bad_history +
+                   missing_certs +
+                   unchecked_sigs + bad_sigs +
+                   missing_keys +
+                   missing_heights + duplicate_heights + incorrect_heights +
+                   extra_branches + bad_branches + missing_branches;
 
   P(F("check complete: %d files; %d rosters; %d revisions; %d keys; %d certs; %d heights; %d branches")
     % checked_files.size()
============================================================
--- src/inodeprint.cc	aa6821e0691244214d92f7a911daf130daf52f6d
+++ src/inodeprint.cc	21cdfd976d545b141021daf701d3caa723ed306b
@@ -111,7 +111,7 @@ class my_iprint_calc : public inodeprint
   std::string res;
   Botan::SHA_160 hash;
   bool too_close;
-  void add_item(void *dat, size_t size)
+  void add_item(void * dat, size_t size)
   {
     hash.update(reinterpret_cast<Botan::byte const *>(&size),
                 sizeof(size));
============================================================
--- src/inodeprint.hh	7def309187f69a62288c27063692f9f85d4cb848
+++ src/inodeprint.hh	31afe053768a80cdaf82453f4147ea5cdb762924
@@ -18,9 +18,9 @@ typedef std::pair<file_path const, hexen
 
 typedef std::pair<file_path const, hexenc<inodeprint> > inodeprint_entry;
 
-typedef std::map<file_path, hexenc<inodeprint>,
-                 std::less<file_path>,
-                 QA(inodeprint_entry) > inodeprint_map;
+typedef std::map < file_path, hexenc<inodeprint>,
+        std::less<file_path>,
+        QA(inodeprint_entry) > inodeprint_map;
 
 std::ostream & operator<<(std::ostream & out, inodeprint_entry const & e);
 
@@ -40,9 +40,9 @@ inodeprint_unchanged(inodeprint_map cons
     {
       hexenc<inodeprint> ip;
       if (inodeprint_file(path, ip) && ip == old_ip->second)
-          return true; // unchanged
+        return true; // unchanged
       else
-          return false; // changed or unavailable
+        return false; // changed or unavailable
     }
   else
     return false; // unavailable
============================================================
--- src/annotate.cc	7defae118c92570f6b8adccffd17bd913ecbf041
+++ src/annotate.cc	20967a20c46eff02daaebfe13ad5acad62365b3e
@@ -238,7 +238,7 @@ annotate_context::initial_lineage() cons
 annotate_context::initial_lineage() const
 {
   shared_ptr<annotate_lineage_mapping>
-    res(new annotate_lineage_mapping(file_lines));
+  res(new annotate_lineage_mapping(file_lines));
   return res;
 }
 
@@ -317,7 +317,7 @@ annotate_context::annotate_equivalent_li
 {
   revision_id null_id;
 
-  for (size_t i=0; i<annotations.size(); i++)
+  for (size_t i = 0; i < annotations.size(); i++)
     {
       if (annotations[i] == null_id)
         {
@@ -379,10 +379,10 @@ cert_date_value(vector<cert> const & cer
                 bool from_start, bool from_end,
                 string const & fmt)
 {
-    string certval = cert_string_value(certs, name, from_start, from_end, "");
-    if (fmt.empty() || certval.empty())
-        return certval;
-    return date_t(certval).as_formatted_localtime(fmt);
+  string certval = cert_string_value(certs, name, from_start, from_end, "");
+  if (fmt.empty() || certval.empty())
+    return certval;
+  return date_t(certval).as_formatted_localtime(fmt);
 }
 
 void
@@ -543,7 +543,7 @@ annotate_lineage_mapping::init_with_line
 
   int count;
   vector<string>::const_iterator i;
-  for (count=0, i = lines.begin(); i != lines.end(); i++, count++)
+  for (count = 0, i = lines.begin(); i != lines.end(); i++, count++)
     {
       file_interned.push_back(in.intern(*i));
       mapping.push_back(count);
@@ -560,7 +560,7 @@ annotate_lineage_mapping::build_parent_l
 {
   bool verbose = false;
   shared_ptr<annotate_lineage_mapping>
-    parent_lineage(new annotate_lineage_mapping(parent_data));
+  parent_lineage(new annotate_lineage_mapping(parent_data));
 
   vector<long, QA(long)> lcs;
   back_insert_iterator< vector<long, QA(long)> > bii(lcs);
@@ -660,7 +660,7 @@ annotate_lineage_mapping::merge(annotate
   I(mapping.size() == other.mapping.size());
   //I(equal_interned(other)); // expensive check
 
-  for (size_t i=0; i<mapping.size(); i++)
+  for (size_t i = 0; i < mapping.size(); i++)
     {
       if (mapping[i] == -1 && other.mapping[i] >= 0)
         mapping[i] = other.mapping[i];
@@ -686,7 +686,7 @@ annotate_lineage_mapping::credit_mapped_
 (shared_ptr<annotate_context> acp) const
 {
   vector<int>::const_iterator i;
-  for (i=mapping.begin(); i != mapping.end(); i++)
+  for (i = mapping.begin(); i != mapping.end(); i++)
     {
       acp->set_touched(*i);
     }
@@ -697,7 +697,7 @@ annotate_lineage_mapping::set_copied_all
 (shared_ptr<annotate_context> acp) const
 {
   vector<int>::const_iterator i;
-  for (i=mapping.begin(); i != mapping.end(); i++)
+  for (i = mapping.begin(); i != mapping.end(); i++)
     {
       acp->set_copied(*i);
     }
@@ -784,9 +784,9 @@ do_annotate_node(database & db,
           L(FL("building parent lineage for parent file %s")
             % file_in_parent);
           parent_lineage
-            = work_unit.lineage->build_parent_lineage(work_unit.annotations,
-                                                      parent_revision,
-                                                      data);
+          = work_unit.lineage->build_parent_lineage(work_unit.annotations,
+                                                    parent_revision,
+                                                    data);
         }
 
       // If this parent has not yet been queued for processing, create the
@@ -849,10 +849,10 @@ do_annotate (app_state & app, project_t 
     % file_node->self % file_node->content % rid);
 
   shared_ptr<annotate_context>
-    acp(new annotate_context(app, project, file_node->content));
+  acp(new annotate_context(app, project, file_node->content));
 
   shared_ptr<annotate_lineage_mapping> lineage
-    = acp->initial_lineage();
+  = acp->initial_lineage();
 
   work_units work_units;
   {
============================================================
--- src/selectors.cc	3efba0ce3139290b4088cea0d6e82a912565471e
+++ src/selectors.cc	daf19249fc9a9723bb4a07f2d418a86b3d9def9f
@@ -160,11 +160,11 @@ string preprocess_date_for_selector(stri
   // for searching a specific date cert this makes no sense
   // FIXME: this is highly speculative if expand_date wasn't called
   // beforehand - tmp could be _anything_ but a partial date string
-  if (tmp.size()<8 && !equals)
+  if (tmp.size() < 8 && !equals)
     tmp += "-01T00:00:00";
-  else if (tmp.size()<11 && !equals)
+  else if (tmp.size() < 11 && !equals)
     tmp += "T00:00:00";
-  E(tmp.size()==19 || equals, origin::user,
+  E(tmp.size() == 19 || equals, origin::user,
     F("selector '%s' is not a valid date (internally completed to '%s')") % sel % tmp);
 
   if (sel != tmp)
@@ -496,7 +496,7 @@ diagnose_wrong_arg_count(string const & 
     FP("the '%s' function takes %d argument, not %d",
        "the '%s' function takes %d arguments, not %d",
        expected)
-      % func % expected % actual);
+    % func % expected % actual);
 }
 
 class fn_selector : public selector
@@ -656,7 +656,7 @@ selector::create_simple_selector(options
   if (sel.size() < 2 || sel[1] != ':')
     return shared_ptr<selector>(new unknown_selector(sel));
   char sel_type = sel[0];
-  sel.erase(0,2);
+  sel.erase(0, 2);
   switch (sel_type)
     {
     case 'a':
@@ -729,7 +729,7 @@ shared_ptr<selector> selector::create(op
           I(!val2.empty());
           E(special_chars.find(val2) != string::npos, origin::user,
             F("selector '%s' is invalid, it contains an unknown escape sequence '%s%s'")
-            % val % '\\' % val2.substr(0,1));
+            % val % '\\' % val2.substr(0, 1));
           splitted.back().append(val2);
 
           ++iter;
@@ -758,73 +758,84 @@ shared_ptr<selector> selector::create(op
     {
       L(FL("Processing token number %d: '%s'") % tok_num % *tok);
       ++tok_num;
-      if (*tok == "(") {
-        items.push_back(parse_item(*tok));
-      } else if (*tok == ")") {
-        unsigned int lparen_pos = 1;
-        while (lparen_pos <= items.size() && idx(items, items.size() - lparen_pos).str != "(")
-          {
-            ++lparen_pos;
-          }
-        E(lparen_pos < items.size(), origin::user,
-          F("selector '%s' is invalid, unmatched ')'") % orig);
-        I(idx(items, items.size() - lparen_pos).str == "(");
-        unsigned int name_idx = items.size() - lparen_pos - 1;
-        if (lparen_pos < items.size() && !idx(items, name_idx).str.empty()
-            && special_chars.find(idx(items, name_idx).str) == string::npos)
-          {
-            // looks like a function call
-            shared_ptr<fn_selector> to_add(new fn_selector(idx(items, name_idx).str));
-            L(FL("found function-like selector '%s' at stack position %d of %d")
-              % items[name_idx].str % name_idx % items.size());
-            // note the closing paren is not on the item stack
-            for (unsigned int i = items.size() - lparen_pos + 1;
-                 i < items.size(); i += 2)
-              {
-                L(FL("        found argument at stack position %d") % i);
-                shared_ptr<selector> arg = idx(items,i).sel;
-                E(i == items.size() - 1 || idx(items,i+1).str == ";", origin::user,
-                  F("selector '%s' is invalid, function argument doesn't look like an arg-list"));
-                to_add->add(arg);
-              }
-            while (name_idx < items.size())
+      if (*tok == "(")
+        {
+          items.push_back(parse_item(*tok));
+        }
+      else if (*tok == ")")
+        {
+          unsigned int lparen_pos = 1;
+          while (lparen_pos <= items.size() && idx(items, items.size() - lparen_pos).str != "(")
+            {
+              ++lparen_pos;
+            }
+          E(lparen_pos < items.size(), origin::user,
+            F("selector '%s' is invalid, unmatched ')'") % orig);
+          I(idx(items, items.size() - lparen_pos).str == "(");
+          unsigned int name_idx = items.size() - lparen_pos - 1;
+          if (lparen_pos < items.size() && !idx(items, name_idx).str.empty()
+              && special_chars.find(idx(items, name_idx).str) == string::npos)
+            {
+              // looks like a function call
+              shared_ptr<fn_selector> to_add(new fn_selector(idx(items, name_idx).str));
+              L(FL("found function-like selector '%s' at stack position %d of %d")
+                % items[name_idx].str % name_idx % items.size());
+              // note the closing paren is not on the item stack
+              for (unsigned int i = items.size() - lparen_pos + 1;
+                   i < items.size(); i += 2)
+                {
+                  L(FL("        found argument at stack position %d") % i);
+                  shared_ptr<selector> arg = idx(items, i).sel;
+                  E(i == items.size() - 1 || idx(items, i + 1).str == ";", origin::user,
+                    F("selector '%s' is invalid, function argument doesn't look like an arg-list"));
+                  to_add->add(arg);
+                }
+              while (name_idx < items.size())
+                items.pop_back();
+              items.push_back(parse_item(to_add));
+            }
+          else
+            {
+              // just parentheses for grouping, closing paren is not on the item stack
+              E(lparen_pos == 2 && idx(items, items.size() - 1).sel, origin::user,
+                F("selector '%s' is invalid, grouping parentheses contain something that "
+                  "doesn't look like an expr") % orig);
+              shared_ptr<selector> to_add(new nested_selector(idx(items, items.size() - 1).sel));
               items.pop_back();
-            items.push_back(parse_item(to_add));
-          }
-        else
-          {
-            // just parentheses for grouping, closing paren is not on the item stack
-            E(lparen_pos == 2 && idx(items, items.size() - 1).sel, origin::user,
-              F("selector '%s' is invalid, grouping parentheses contain something that "
-                "doesn't look like an expr") % orig);
-            shared_ptr<selector> to_add(new nested_selector(idx(items, items.size() - 1).sel));
-            items.pop_back();
-            items.pop_back();
-            items.push_back(parse_item(to_add));
-          }
-      } else if (*tok == ";") {
-        items.push_back(parse_item(*tok));
-      } else if (*tok == "/") {
-        E(!items.empty(), origin::user,
-          F("selector '%s' is invalid, because it starts with a '/'") % orig);
-        items.push_back(parse_item(*tok));
-      } else if (*tok == "|") {
-        E(!items.empty(), origin::user,
-          F("selector '%s' is invalid, because it starts with a '|'") % orig);
-        items.push_back(parse_item(*tok));
-      } else {
-        vector<string>::const_iterator next = tok;
-        ++next;
-        bool next_is_oparen = false;
-        if (next != splitted.end())
-          next_is_oparen = (*next == "(");
-        if (next_is_oparen)
+              items.pop_back();
+              items.push_back(parse_item(to_add));
+            }
+        }
+      else if (*tok == ";")
+        {
           items.push_back(parse_item(*tok));
-        else
-          items.push_back(parse_item(create_simple_selector(opts, lua,
-                                                            project,
-                                                            *tok)));
-      }
+        }
+      else if (*tok == "/")
+        {
+          E(!items.empty(), origin::user,
+            F("selector '%s' is invalid, because it starts with a '/'") % orig);
+          items.push_back(parse_item(*tok));
+        }
+      else if (*tok == "|")
+        {
+          E(!items.empty(), origin::user,
+            F("selector '%s' is invalid, because it starts with a '|'") % orig);
+          items.push_back(parse_item(*tok));
+        }
+      else
+        {
+          vector<string>::const_iterator next = tok;
+          ++next;
+          bool next_is_oparen = false;
+          if (next != splitted.end())
+            next_is_oparen = (*next == "(");
+          if (next_is_oparen)
+            items.push_back(parse_item(*tok));
+          else
+            items.push_back(parse_item(create_simple_selector(opts, lua,
+                                                              project,
+                                                              *tok)));
+        }
 
       // may have an infix operator to reduce
       if (items.size() >= 3 && items.back().sel)
============================================================
--- src/restrictions.cc	211bf4562ebbe67950b900a45a28bf0c91395439
+++ src/restrictions.cc	dbcf9e72fc438f1ac922d2d8b3917610fa4e6e53
@@ -29,10 +29,10 @@ typedef map<node_id, restricted_path::st
 // include these nodes.
 
 typedef map<node_id, restricted_path::status>::const_iterator
-        node_status_iterator;
+node_status_iterator;
 
 typedef map<file_path, restricted_path::status>::const_iterator
-        path_status_iterator;
+path_status_iterator;
 
 typedef set<file_path>::const_iterator path_iterator;
 
============================================================
--- src/restrictions.hh	6cc851724aa78f04c22327a1a82dfe4facbca43a
+++ src/restrictions.hh	5a4b84eb11595378823d7e654c4d235294c8758e
@@ -57,28 +57,28 @@ namespace restricted_path
 namespace restricted_path
 {
   enum status
-    {
-      included,
-      excluded,
-      required,
-      included_required,
-      excluded_required
-    };
+  {
+    included,
+    excluded,
+    required,
+    included_required,
+    excluded_required
+  };
 }
 
 class restriction
 {
- public:
+public:
   bool empty() const
   { return included_paths.empty() && excluded_paths.empty(); }
 
   enum include_rules
-    {
-      explicit_includes,
-      implicit_includes
-    };
+  {
+    explicit_includes,
+    implicit_includes
+  };
 
- protected:
+protected:
   restriction() : depth(-1) {}
 
   restriction(std::vector<file_path> const & includes,
@@ -91,7 +91,7 @@ class node_restriction : public restrict
 
 class node_restriction : public restriction
 {
- public:
+public:
   node_restriction() : restriction() {}
 
   node_restriction(std::vector<file_path> const & includes,
@@ -132,14 +132,14 @@ class node_restriction : public restrict
     return *this;
   }
 
- private:
+private:
   std::set<file_path> known_paths;
   std::map<node_id, restricted_path::status> node_map;
 };
 
 class path_restriction : public restriction
 {
- public:
+public:
   enum skip_check_t { skip_check };
 
   path_restriction() : restriction() {}
@@ -158,7 +158,7 @@ class path_restriction : public restrict
 
   bool includes(file_path const & sp) const;
 
- private:
+private:
   std::map<file_path, restricted_path::status> path_map;
 };
 
============================================================
--- src/globish.cc	05742d8536befa06b6edfb9a85308e45005375c4
+++ src/globish.cc	34b3abdc8e3423e6ca1b9a275ef8a1f14d045b42
@@ -29,7 +29,8 @@ using std::back_insert_iterator;
 // control-character range.  This is why bytes <= 0x1f are not allowed in the
 // pattern.
 
-enum metachar {
+enum metachar
+{
   META_STAR = 1,   // *
   META_QUES,       // ?
   META_CC_BRA,     // [
@@ -73,7 +74,7 @@ compile_charclass(string const & pat, st
       // A dash at the beginning or end of the pattern is literal.
       else if (*p == '-'
                && !in_class.empty()
-               && p+1 != pat.end()
+               && p + 1 != pat.end()
                && p[1] != ']')
         {
           p++;
@@ -103,7 +104,7 @@ compile_charclass(string const & pat, st
               "in classes") % pat);
 
           L(FL("expanding range from %X (%c) to %X (%c)")
-            % (start+1) % (char)(start+1) % stop % (char)stop);
+            % (start + 1) % (char)(start + 1) % stop % (char)stop);
 
           for (unsigned int r = start + 1; r < stop; r++)
             in_class.push_back((char)r);
@@ -301,7 +302,7 @@ decode(string::const_iterator p, string:
       case META_CC_BRA:     s.push_back('['); break;
       case META_CC_KET:     s.push_back(']'); break;
       case META_CC_INV_BRA: s.push_back('[');
-                            s.push_back('!'); break;
+        s.push_back('!'); break;
 
       case META_ALT_BRA:    s.push_back('{'); break;
       case META_ALT_KET:    s.push_back('}'); break;
@@ -348,7 +349,7 @@ globish::contains_meta_chars() const
       case META_ALT_BRA:
       case META_ALT_KET:
       case META_ALT_OR:
-          return true;
+        return true;
       }
   return false;
 }
@@ -367,8 +368,8 @@ find_next_subpattern(string::const_itera
 
 static string::const_iterator
 find_next_subpattern(string::const_iterator p,
-                       string::const_iterator pe,
-                       bool want_alternatives)
+                     string::const_iterator pe,
+                     bool want_alternatives)
 {
   L(FL("Finding subpattern in '%s'") % decode(p, pe));
   unsigned int depth = 1;
@@ -385,12 +386,12 @@ find_next_subpattern(string::const_itera
       case META_ALT_KET:
         depth--;
         if (depth == 0)
-          return p+1;
+          return p + 1;
         break;
 
       case META_ALT_OR:
         if (depth == 1 && want_alternatives)
-          return p+1;
+          return p + 1;
         break;
       }
 
@@ -405,7 +406,7 @@ do_match(string::const_iterator sb, stri
   unsigned int sc, pc;
   string::const_iterator s(sb);
 
-  L(FL("subpattern: '%s' against '%s'") % string(s,se) % decode(p,pe));
+  L(FL("subpattern: '%s' against '%s'") % string(s, se) % decode(p, pe));
 
   while (p < pe)
     {
@@ -414,11 +415,14 @@ do_match(string::const_iterator sb, stri
       pc = widen<unsigned int, char>(*p++);
       // sc will be the current string character
       // s will point to sc
-      if(s < se) {
-        sc = widen<unsigned int, char>(*s);
-      } else {
-        sc = 0;
-      }
+      if(s < se)
+        {
+          sc = widen<unsigned int, char>(*s);
+        }
+      else
+        {
+          sc = 0;
+        }
       switch (pc)
         {
         default:           // literal
@@ -432,23 +436,23 @@ do_match(string::const_iterator sb, stri
           break;
 
         case META_CC_BRA:  // any of these characters
-          {
-            bool matched = false;
-            I(p < pe);
-            I(*p != META_CC_KET);
-            do
-              {
-                if (widen<unsigned int, char>(*p) == sc)
-                  matched = true;
-                p++;
-                I(p < pe);
-              }
-            while (*p != META_CC_KET);
-            if (!matched)
-              return false;
-          }
-          p++;
-          break;
+        {
+          bool matched = false;
+          I(p < pe);
+          I(*p != META_CC_KET);
+          do
+            {
+              if (widen<unsigned int, char>(*p) == sc)
+                matched = true;
+              p++;
+              I(p < pe);
+            }
+          while (*p != META_CC_KET);
+          if (!matched)
+            return false;
+        }
+        p++;
+        break;
 
         case META_CC_INV_BRA:  // any but these characters
           I(p < pe);
@@ -500,37 +504,37 @@ do_match(string::const_iterator sb, stri
           return false;
 
         case META_ALT_BRA:
-          {
-            string::const_iterator prest, psub, pnext;
-            string::const_iterator srest;
+        {
+          string::const_iterator prest, psub, pnext;
+          string::const_iterator srest;
 
-            prest = find_next_subpattern(p, pe, false);
-            psub = p;
-            // [ psub ... prest ) is the current bracket pair
-            // (including the *closing* braket, but not the opening braket)
-            do
-              {
-                pnext = find_next_subpattern(psub, pe, true);
-                // pnext points just after a comma or the closing braket
-                // [ psub ... pnext ) is one branch with trailing delimiter
-                srest = (prest == pe ? se : s);
-                for (; srest < se; srest++)
-                  {
-                    if (do_match(s, srest, psub, pnext - 1)
-                        && do_match(srest, se, prest, pe))
-                      return true;
-                  }
-                // try the empty target too
-                if (do_match(s, srest, psub, pnext - 1)
-                    && do_match(srest, se, prest, pe))
-                  return true;
+          prest = find_next_subpattern(p, pe, false);
+          psub = p;
+          // [ psub ... prest ) is the current bracket pair
+          // (including the *closing* braket, but not the opening braket)
+          do
+            {
+              pnext = find_next_subpattern(psub, pe, true);
+              // pnext points just after a comma or the closing braket
+              // [ psub ... pnext ) is one branch with trailing delimiter
+              srest = (prest == pe ? se : s);
+              for (; srest < se; srest++)
+                {
+                  if (do_match(s, srest, psub, pnext - 1)
+                      && do_match(srest, se, prest, pe))
+                    return true;
+                }
+              // try the empty target too
+              if (do_match(s, srest, psub, pnext - 1)
+                  && do_match(srest, se, prest, pe))
+                return true;
 
-                psub = pnext;
-              }
-            while (pnext < prest);
-            return false;
-          }
+              psub = pnext;
+            }
+          while (pnext < prest);
+          return false;
         }
+        }
       if (s < se)
         {
           ++s;
============================================================
--- src/hash_map.hh	10db5cb434769d726d1f354c7ac19c8d102152b9
+++ src/hash_map.hh	7304e09d7b6002f07b01285b996abd2cbf6f1f22
@@ -11,7 +11,8 @@
 #define __HASHMAP_HH
 
 #include <functional>
-namespace hashmap {
+namespace hashmap
+{
 
   template<typename T>
   class equal_to : public std::equal_to<T>
@@ -56,7 +57,8 @@ namespace hashmap {
 #include <tr1/unordered_map>
 #include <tr1/unordered_set>
 
-namespace hashmap {
+namespace hashmap
+{
   template<>
   struct hash<std::string>
   {
@@ -67,24 +69,24 @@ namespace hashmap {
   };
 
   template<typename _Key, typename _Value>
-  class hash_map : public std::tr1::unordered_map<_Key,
-                                                  _Value,
-                                                  hash<_Key>,
-                                                  equal_to<_Key> >
-  {};
+  class hash_map : public std::tr1::unordered_map < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key>
-  class hash_set : public std::tr1::unordered_set<_Key,
-                                                  hash<_Key>,
-                                                  equal_to<_Key> >
-  {};
+  class hash_set : public std::tr1::unordered_set < _Key,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key, typename _Value>
-  class hash_multimap : public std::tr1::unordered_multimap<_Key,
-                                                            _Value,
-                                                            hash<_Key>,
-                                                            equal_to<_Key> >
-  {};
+  class hash_multimap : public std::tr1::unordered_multimap < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 }
 
 #elif defined(HAVE_GNUCXX_HASHMAP)
@@ -92,7 +94,8 @@ namespace hashmap {
 #include <ext/hash_map>
 #include <ext/hash_set>
 
-namespace hashmap {
+namespace hashmap
+{
   template<>
   struct hash<std::string>
   {
@@ -103,24 +106,24 @@ namespace hashmap {
   };
 
   template<typename _Key, typename _Value>
-  class hash_map : public __gnu_cxx::hash_map<_Key,
-                                              _Value,
-                                              hash<_Key>,
-                                              equal_to<_Key> >
-  {};
+  class hash_map : public __gnu_cxx::hash_map < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key>
-  class hash_set : public __gnu_cxx::hash_set<_Key,
-                                              hash<_Key>,
-                                              equal_to<_Key> >
-  {};
+  class hash_set : public __gnu_cxx::hash_set < _Key,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key, typename _Value>
-  class hash_multimap : public __gnu_cxx::hash_multimap<_Key,
-                                                        _Value,
-                                                        hash<_Key>,
-                                                        equal_to<_Key> >
-  {};
+  class hash_multimap : public __gnu_cxx::hash_multimap < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
 
 }
@@ -130,39 +133,40 @@ namespace hashmap {
 #include <hash_map>
 #include <hash_set>
 
-namespace hashmap {
+namespace hashmap
+{
   template<>
   struct hash<std::string>
   {
     size_t operator()(std::string const & s) const
     {
-      const char* s2=s.c_str();
+      const char * s2 = s.c_str();
       unsigned long h = 0;
       for ( ; *s2; ++s2)
-        h = 5*h + *s2;
+        h = 5 * h + *s2;
       return size_t(h);
     }
   };
 
   template<typename _Key, typename _Value>
-  class hash_map : public std::hash_map<_Key,
-                                        _Value,
-                                        hash<_Key>,
-                                        equal_to<_Key> >
-  {};
+  class hash_map : public std::hash_map < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key>
-  class hash_set : public std::hash_set<_Key,
-                                        hash<_Key>,
-                                        equal_to<_Key> >
-  {};
+  class hash_set : public std::hash_set < _Key,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 
   template<typename _Key, typename _Value>
-  class hash_multimap : public std::hash_multimap<_Key,
-                                                  _Value,
-                                                  hash<_Key>,
-                                                  equal_to<_Key> >
-  {};
+  class hash_multimap : public std::hash_multimap < _Key,
+    _Value,
+    hash<_Key>,
+    equal_to<_Key> >
+{};
 }
 
 #elif _MSC_VER
@@ -177,10 +181,10 @@ namespace hashmap
   {
     size_t operator()(std::string const & s) const
     {
-      const char* s2=s.c_str();
+      const char * s2 = s.c_str();
       unsigned long h = 0;
       for ( ; *s2; ++s2)
-        h = 5*h + *s2;
+        h = 5 * h + *s2;
       return size_t(h);
     }
   };
@@ -204,18 +208,18 @@ namespace hashmap
     }
   };
   template <typename _Key, typename _Value>
-  struct hash_map : public stdext::hash_map<_Key,
-                                            _Value,
-                                            hash_traits<_Key> >
+  struct hash_map : public stdext::hash_map < _Key,
+    _Value,
+    hash_traits<_Key> >
   {};
   template <typename _Key, typename _Value>
-  struct hash_multimap : public stdext::hash_multimap<_Key,
-                                                      _Value,
-                                                      hash_traits<_Key> >
+  struct hash_multimap : public stdext::hash_multimap < _Key,
+    _Value,
+    hash_traits<_Key> >
   {};
   template <typename _Key>
-  struct hash_set : public stdext::hash_set<_Key,
-                                            hash_traits<_Key> >
+  struct hash_set : public stdext::hash_set < _Key,
+    hash_traits<_Key> >
   {};
 }
 #endif
============================================================
--- src/string_queue.hh	5053a58d5e02d505c19a02b2101dc5876d5bfbaa
+++ src/string_queue.hh	14f6e3629d400cd90d96bf435563bb3cd62f51d3
@@ -24,219 +24,219 @@ public:
 {
 public:
   string_queue (size_t default_size = 8192)
-    {
-      buf = new char[default_size];
-      front = back = buf;
-      end = buf + default_size;
-    }
+  {
+    buf = new char[default_size];
+    front = back = buf;
+    end = buf + default_size;
+  }
 
   ~string_queue ()
-    {
-      delete[]buf;
-    }
+  {
+    delete[]buf;
+  }
 
   void append (const std::string & v)
-    {
-      selfcheck ();
-      reserve_additional (v.size ());
-      simple_append (v);
-      if (do_selfcheck)
-        {
-          selfcheck_str.append (v);
-          selfcheck ();
-        }
-    };
+  {
+    selfcheck ();
+    reserve_additional (v.size ());
+    simple_append (v);
+    if (do_selfcheck)
+      {
+        selfcheck_str.append (v);
+        selfcheck ();
+      }
+  };
 
-  void append (const char *str, size_t bytes)
-    {
-      selfcheck ();
-      reserve_additional (bytes);
-      simple_append (str, bytes);
-      if (do_selfcheck)
-        {
-          selfcheck_str.append (std::string (str, bytes));
-          selfcheck ();
-        }
-    };
+  void append (const char * str, size_t bytes)
+  {
+    selfcheck ();
+    reserve_additional (bytes);
+    simple_append (str, bytes);
+    if (do_selfcheck)
+      {
+        selfcheck_str.append (std::string (str, bytes));
+        selfcheck ();
+      }
+  };
 
   void append (const char v)
-    {
-      selfcheck ();
-      if (available_size () >= 1)
-        {
-          *back = v;
-          ++back;
-        }
-      else
-        {
-          std::string tmp;
-          tmp += v;
-          I (tmp.size () == 1 && tmp[0] == v);
-          append (tmp);
-        }
-      if (do_selfcheck)
-        {
-          selfcheck_str += v;
-          selfcheck ();
-        }
-    }
+  {
+    selfcheck ();
+    if (available_size () >= 1)
+      {
+        *back = v;
+        ++back;
+      }
+    else
+      {
+        std::string tmp;
+        tmp += v;
+        I (tmp.size () == 1 && tmp[0] == v);
+        append (tmp);
+      }
+    if (do_selfcheck)
+      {
+        selfcheck_str += v;
+        selfcheck ();
+      }
+  }
 
   string_queue & operator+= (const char v)
-    {
-      append (v);
-      return *this;
-    }
+  {
+    append (v);
+    return *this;
+  }
 
   string_queue & operator+= (const std::string & v)
-    {
-      append (v);
-      return *this;
-    }
+  {
+    append (v);
+    return *this;
+  }
 
-  char &operator[] (size_t pos)
-    {
-      I (pos < used_size ());
-      return front[pos];
-    }
+  char & operator[] (size_t pos)
+  {
+    I (pos < used_size ());
+    return front[pos];
+  }
 
-  const char &operator[] (size_t pos) const
-    {
-      I (pos < used_size ());
-      return front[pos];
-    }
+  const char & operator[] (size_t pos) const
+  {
+    I (pos < used_size ());
+    return front[pos];
+  }
 
   void pop_front (size_t amount)
-    {
-      selfcheck ();
-      I (used_size () >= amount);
-      front += amount;
-      if (front == back)
-        {
-          front = back = buf;
-        }
-      if (used_size () * 3 < buffer_size () && buffer_size () > (1024 * 1024))
-        {
-          // don't bother shrinking unless it will help a lot, and
-          // we're using enough memory to care.
-          size_t a_new_size = (size_t) (used_size () * 1.1);    // leave some headroom
-          resize_buffer (std::max ((size_t) 8192, a_new_size));
-        }
-      if (do_selfcheck)
-        {
-          selfcheck_str.erase (0, amount);
-          selfcheck ();
-        }
-    }
+  {
+    selfcheck ();
+    I (used_size () >= amount);
+    front += amount;
+    if (front == back)
+      {
+        front = back = buf;
+      }
+    if (used_size () * 3 < buffer_size () && buffer_size () > (1024 * 1024))
+      {
+        // don't bother shrinking unless it will help a lot, and
+        // we're using enough memory to care.
+        size_t a_new_size = (size_t) (used_size () * 1.1);    // leave some headroom
+        resize_buffer (std::max ((size_t) 8192, a_new_size));
+      }
+    if (do_selfcheck)
+      {
+        selfcheck_str.erase (0, amount);
+        selfcheck ();
+      }
+  }
 
   std::string substr (size_t pos, size_t size) const
-    {
-      I (size <= max_string_queue_incr);
-      I (pos <= max_string_queue_size);
-      I (used_size () >= (pos + size));
-      return std::string (front + pos, size);
-    }
+  {
+    I (size <= max_string_queue_incr);
+    I (pos <= max_string_queue_size);
+    I (used_size () >= (pos + size));
+    return std::string (front + pos, size);
+  }
 
-  const char *front_pointer (size_t strsize) const
-    {
-      I (strsize <= max_string_queue_size);
-      I (used_size () >= strsize);
-      return front;
-    }
+  const char * front_pointer (size_t strsize) const
+  {
+    I (strsize <= max_string_queue_size);
+    I (used_size () >= strsize);
+    return front;
+  }
 
   size_t size () const
-    {
-      return used_size ();
-    }
+  {
+    return used_size ();
+  }
   size_t used_size () const
-    {
-      return (size_t) (back - front);
-    }
+  {
+    return (size_t) (back - front);
+  }
   size_t buffer_size () const
-    {
-      return (size_t) (end - buf);
-    }
+  {
+    return (size_t) (end - buf);
+  }
   size_t available_size () const
-    {
-      return (size_t) (end - back);
-    }
+  {
+    return (size_t) (end - back);
+  }
   bool empty () const
-    {
-      return front == back;
-    }
+  {
+    return front == back;
+  }
 
   void selfcheck ()
-    {
-      if (do_selfcheck)
-        {
-          I (buf <= front && front <= back && back <= end);
-          I (selfcheck_str.size () == used_size ()
-             && std::memcmp (selfcheck_str.data (), front, used_size ()) == 0);
-        }
-    }
+  {
+    if (do_selfcheck)
+      {
+        I (buf <= front && front <= back && back <= end);
+        I (selfcheck_str.size () == used_size ()
+           && std::memcmp (selfcheck_str.data (), front, used_size ()) == 0);
+      }
+  }
 
   void reserve_total (size_t amount)
-    {
-      if ((size_t) (end - front) >= amount)
-        {
-          return;
-        }
-      reserve_additional (amount - available_size ());
-    }
+  {
+    if ((size_t) (end - front) >= amount)
+      {
+        return;
+      }
+    reserve_additional (amount - available_size ());
+  }
 
   void reserve_additional (size_t amount)
-    {
-      I(amount <= max_string_queue_incr);
-      if (available_size () >= amount)
+  {
+    I(amount <= max_string_queue_incr);
+    if (available_size () >= amount)
+      return;
+    if (1.25 * (used_size () + amount) < buffer_size ())
+      {
+        // 1.25* to make sure that we don't do a lot of remove 1 byte from
+        // beginning, move entire array, append a byte, etc.
+        size_t save_used_size = used_size ();
+        std::memmove (buf, front, save_used_size);
+        front = buf;
+        back = front + save_used_size;
+        selfcheck ();
         return;
-      if (1.25 * (used_size () + amount) < buffer_size ())
-        {
-          // 1.25* to make sure that we don't do a lot of remove 1 byte from
-          // beginning, move entire array, append a byte, etc.
-          size_t save_used_size = used_size ();
-          std::memmove (buf, front, save_used_size);
-          front = buf;
-          back = front + save_used_size;
-          selfcheck ();
-          return;
-        }
-      // going to expand the buffer, increase by at least 1.25x so that
-      // we don't have a pathological case of reserving a little extra
-      // a whole bunch of times
-      size_t new_buffer_size =
-        std::max ((size_t) (1.25 * buffer_size ()),
-                  used_size () + amount);
-      resize_buffer (new_buffer_size);
-      selfcheck ();
-    }
+      }
+    // going to expand the buffer, increase by at least 1.25x so that
+    // we don't have a pathological case of reserving a little extra
+    // a whole bunch of times
+    size_t new_buffer_size =
+      std::max ((size_t) (1.25 * buffer_size ()),
+                used_size () + amount);
+    resize_buffer (new_buffer_size);
+    selfcheck ();
+  }
 
 protected:
   void simple_append (const std::string & v)
-    {
-      I ((size_t) (end - back) >= v.size ());
-      I (v.size() <= max_string_queue_incr);
-      std::memcpy (back, v.data (), v.size ());
-      back += v.size ();
-    }
+  {
+    I ((size_t) (end - back) >= v.size ());
+    I (v.size() <= max_string_queue_incr);
+    std::memcpy (back, v.data (), v.size ());
+    back += v.size ();
+  }
 
-  void simple_append (const char *str, size_t bytes)
-    {
-      I ((size_t) (end - back) >= bytes);
-      I (bytes <= max_string_queue_incr);
-      std::memcpy (back, str, bytes);
-      back += bytes;
-    }
+  void simple_append (const char * str, size_t bytes)
+  {
+    I ((size_t) (end - back) >= bytes);
+    I (bytes <= max_string_queue_incr);
+    std::memcpy (back, str, bytes);
+    back += bytes;
+  }
 
   void resize_buffer (size_t new_buffer_size)
-    {
-      I (new_buffer_size <= max_string_queue_size);
-      size_t save_used_size = used_size ();
-      char *newbuf = new char[new_buffer_size];
-      std::memcpy (newbuf, front, save_used_size);
-      delete[]buf;
-      buf = front = newbuf;
-      back = front + save_used_size;
-      end = buf + new_buffer_size;
-    }
+  {
+    I (new_buffer_size <= max_string_queue_size);
+    size_t save_used_size = used_size ();
+    char * newbuf = new char[new_buffer_size];
+    std::memcpy (newbuf, front, save_used_size);
+    delete[]buf;
+    buf = front = newbuf;
+    back = front + save_used_size;
+    end = buf + new_buffer_size;
+  }
 
 private:
   static const bool do_selfcheck = false;
@@ -244,10 +244,10 @@ private:
   static const size_t max_string_queue_size = 500 * 1024 * 1024;
   static const size_t max_string_queue_incr = 500 * 1024 * 1024;
   string_queue (string_queue & from)
-    {
-      std::abort ();
-    }
-  char *buf, *front, *back, *end;
+  {
+    std::abort ();
+  }
+  char * buf, *front, *back, *end;
   std::string selfcheck_str;
 };
 
============================================================
--- src/paths.cc	f4244d2b15de4c1b8005fa8b244fe8391ce61d1b
+++ src/paths.cc	971f5fff7b22a3b8574036742e66fdad28295f1e
@@ -132,7 +132,7 @@ has_bad_chars(string const & path)
     {
       // char is often a signed type; convert to unsigned to ensure that
       // bytes 0x80-0xff are considered > 0x1f.
-      u8 x = (u8)*c;
+      u8 x = (u8) * c;
       // 0x5c is '\\'; we use the hex constant to make the dependency on
       // ASCII encoding explicit.
       if (UNLIKELY(x <= 0x1f || x == 0x5c || x == 0x7f))
@@ -149,7 +149,7 @@ has_bad_component_chars(string const & p
     {
       // char is often a signed type; convert to unsigned to ensure that
       // bytes 0x80-0xff are considered > 0x1f.
-      u8 x = (u8)*c;
+      u8 x = (u8) * c;
       // 0x2f is '/' and 0x5c is '\\'; we use hex constants to make the
       // dependency on ASCII encoding explicit.
       if (UNLIKELY(x <= 0x1f || x == 0x2f || x == 0x5c || x == 0x7f))
@@ -524,7 +524,7 @@ any_path::basename() const
   string const & s = data;
   string::size_type sep = s.rfind('/');
 #ifdef WIN32
-  if (sep == string::npos && s.size()>= 2 && s[1] == ':')
+  if (sep == string::npos && s.size() >= 2 && s[1] == ':')
     sep = 1;
 #endif
   if (sep == string::npos)
@@ -542,7 +542,7 @@ any_path::dirname() const
   string const & s = data;
   string::size_type sep = s.rfind('/');
 #ifdef WIN32
-  if (sep == string::npos && s.size()>= 2 && s[1] == ':')
+  if (sep == string::npos && s.size() >= 2 && s[1] == ':')
     sep = 1;
 #endif
   if (sep == string::npos)
@@ -557,8 +557,8 @@ any_path::dirname() const
 #ifdef WIN32
       || (sep == 1 || (sep == 2 && s[1] == ':'))
 #endif
-      )
-    return any_path(s, 0, sep+1);
+     )
+    return any_path(s, 0, sep + 1);
 
   return any_path(s, 0, sep);
 }
@@ -581,7 +581,7 @@ system_path::dirname() const
   string const & s = data;
   string::size_type sep = s.rfind('/');
 #ifdef WIN32
-  if (sep == string::npos && s.size()>= 2 && s[1] == ':')
+  if (sep == string::npos && s.size() >= 2 && s[1] == ':')
     sep = 1;
 #endif
   I(sep != string::npos);
@@ -595,8 +595,8 @@ system_path::dirname() const
 #ifdef WIN32
       || (sep == 1 || (sep == 2 && s[1] == ':'))
 #endif
-      )
-    return system_path(s, 0, sep+1);
+     )
+    return system_path(s, 0, sep + 1);
 
   return system_path(s, 0, sep);
 }
@@ -976,7 +976,7 @@ find_bookdir(system_path const & root, p
     }
   return false;
 
- found:
+found:
   // check for _MTN/. and _MTN/.. to see if mt dir is readable
   try
     {
@@ -1022,7 +1022,7 @@ find_and_go_to_workspace(string const & 
         }
       else if (cur_str.size() > 1 && cur_str[1] == ':')
         {
-          root = system_path(cur_str.substr(0,2) + "/", origin::system);
+          root = system_path(cur_str.substr(0, 2) + "/", origin::system);
         }
       else I(false);
 #else
@@ -1035,8 +1035,8 @@ find_and_go_to_workspace(string const & 
       L(FL("limiting search for workspace to %s") % root);
 
       require_path_is_directory(root,
-                               F("search root '%s' does not exist") % root,
-                               F("search root '%s' is not a directory") % root);
+                                F("search root '%s' does not exist") % root,
+                                F("search root '%s' is not a directory") % root);
     }
 
   // first look for the current name of the bookkeeping directory.
============================================================
--- src/paths.hh	2939f11db21e23d68530ed7415b9bdfa8dfd5a20
+++ src/paths.hh	e0017f5b8e0ee0185f58ec2a41e3770413a2de51
@@ -417,7 +417,7 @@ template <class T>
                                            char const * s, bool isdir);
 
 template <class T>
-bool safe_compose(T const & p, char const * s, T & result, bool isdir=false)
+bool safe_compose(T const & p, char const * s, T & result, bool isdir = false)
 {
   try
     {
============================================================
--- src/key_store.cc	64c4c4bed1fadc26b51207b9f61343f9dd7d3c6e
+++ src/key_store.cc	9b092fc3342d8d9e8a31842712119bd0dabddf4e
@@ -435,12 +435,12 @@ struct key_delete_validator : public pac
   virtual void consume_key_pair(key_name const & name,
                                 keypair const & kp)
   {
-     L(FL("reading key pair '%s' from key store for validation") % name);
-     key_id ident;
-     key_hash_code(name, kp.pub, ident);
-     E(ident == expected_ident, origin::user,
-       F("expected key with id %s in key file '%s', got key with id %s")
-         % expected_ident % file % ident);
+    L(FL("reading key pair '%s' from key store for validation") % name);
+    key_id ident;
+    key_hash_code(name, kp.pub, ident);
+    E(ident == expected_ident, origin::user,
+      F("expected key with id %s in key file '%s', got key with id %s")
+      % expected_ident % file % ident);
   }
   virtual void consume_old_private_key(key_name const & ident,
                                        old_arc4_rsa_priv_key const & k)
@@ -457,20 +457,20 @@ key_store::delete_key(key_id const & ide
       system_path file;
       s->get_key_file(ident, i->second.first, file);
       if (!file_exists(file))
-          s->get_old_key_file(i->second.first, file);
+        s->get_old_key_file(i->second.first, file);
 
       // sanity: if we read the key originally from a file which did not
       // follow the NAME.IDENT scheme and have another key pair with NAME
       // in the key dir, we could accidentially drop the wrong private key
       // here, so validate if the file really contains the key with the
       // ID we want to delete, before going mad
-        {
-          key_delete_validator val(ident, file);
-          data dat;
-          read_data(file, dat);
-          istringstream is(dat());
-          I(read_packets(is, val));
-        }
+      {
+        key_delete_validator val(ident, file);
+        data dat;
+        read_data(file, dat);
+        istringstream is(dat());
+        I(read_packets(is, val));
+      }
 
       delete_file(file);
 
@@ -557,7 +557,7 @@ key_store_state::decrypt_private_key(key
 {
   // See if we have this key in the decrypted key cache.
   map<key_id, shared_ptr<RSA_PrivateKey> >::const_iterator
-    cpk = privkey_cache.find(id);
+  cpk = privkey_cache.find(id);
   if (cpk != privkey_cache.end())
     return cpk->second;
 
@@ -723,7 +723,7 @@ key_store::create_key_pair(database & db
 
   // convert to storage format
   L(FL("generated %d-byte public key\n"
-      "generated %d-byte (encrypted) private key\n")
+       "generated %d-byte (encrypted) private key\n")
     % kp.pub().size()
     % kp.priv().size());
 
@@ -814,12 +814,12 @@ key_store::decrypt_rsa(key_id const & id
       shared_ptr<RSA_PrivateKey> priv_key = s->decrypt_private_key(id);
 
       shared_ptr<PK_Decryptor>
-        decryptor(get_pk_decryptor(*priv_key, "EME1(SHA-1)"));
+      decryptor(get_pk_decryptor(*priv_key, "EME1(SHA-1)"));
 
       SecureVector<Botan::byte> plain =
         decryptor->decrypt(reinterpret_cast<Botan::byte const *>(ciphertext().data()),
                            ciphertext().size());
-      plaintext = string(reinterpret_cast<char const*>(plain.begin()),
+      plaintext = string(reinterpret_cast<char const *>(plain.begin()),
                          plain.size());
     }
   catch (Botan::Exception & ex)
@@ -854,22 +854,23 @@ key_store::make_signature(database & db,
       || s->ssh_sign_mode == "check"
       || s->ssh_sign_mode == "only")
     {
-      if (agent.connected()) {
-        //grab the monotone public key as an RSA_PublicKey
-        SecureVector<Botan::byte> pub_block;
-        pub_block.set(reinterpret_cast<Botan::byte const *>(key.pub().data()),
-                      key.pub().size());
-        L(FL("make_signature: building %d-byte pub key") % pub_block.size());
-        shared_ptr<X509_PublicKey> x509_key =
-          shared_ptr<X509_PublicKey>(Botan::X509::load_key(pub_block));
-        shared_ptr<RSA_PublicKey> pub_key = shared_dynamic_cast<RSA_PublicKey>(x509_key);
+      if (agent.connected())
+        {
+          //grab the monotone public key as an RSA_PublicKey
+          SecureVector<Botan::byte> pub_block;
+          pub_block.set(reinterpret_cast<Botan::byte const *>(key.pub().data()),
+                        key.pub().size());
+          L(FL("make_signature: building %d-byte pub key") % pub_block.size());
+          shared_ptr<X509_PublicKey> x509_key =
+            shared_ptr<X509_PublicKey>(Botan::X509::load_key(pub_block));
+          shared_ptr<RSA_PublicKey> pub_key = shared_dynamic_cast<RSA_PublicKey>(x509_key);
 
-        if (!pub_key)
-          throw recoverable_failure(origin::system,
-                                    "Failed to get monotone RSA public key");
+          if (!pub_key)
+            throw recoverable_failure(origin::system,
+                                      "Failed to get monotone RSA public key");
 
-        agent.sign_data(*pub_key, tosign, sig_string);
-      }
+          agent.sign_data(*pub_key, tosign, sig_string);
+        }
       if (sig_string.length() <= 0)
         L(FL("make_signature: monotone and ssh-agent keys do not match, will"
              " use monotone signing"));
@@ -904,10 +905,11 @@ key_store::make_signature(database & db,
           priv_key = s->decrypt_private_key(id);
           if (agent.connected()
               && s->ssh_sign_mode != "only"
-              && s->ssh_sign_mode != "no") {
-            L(FL("make_signature: adding private key (%s) to ssh-agent") % id);
-            agent.add_identity(*priv_key, name());
-          }
+              && s->ssh_sign_mode != "no")
+            {
+              L(FL("make_signature: adding private key (%s) to ssh-agent") % id);
+              agent.add_identity(*priv_key, name());
+            }
           signer = shared_ptr<PK_Signer>(get_pk_signer(*priv_key, "EMSA3(SHA-1)"));
 
           /* If persist_phrase is true, the RSA_PrivateKey object is
@@ -919,14 +921,14 @@ key_store::make_signature(database & db,
 
 #if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,7,7)
       sig = signer->sign_message(
-        reinterpret_cast<Botan::byte const *>(tosign.data()),
-        tosign.size(), lazy_rng::get());
+              reinterpret_cast<Botan::byte const *>(tosign.data()),
+              tosign.size(), lazy_rng::get());
 #else
       sig = signer->sign_message(
-        reinterpret_cast<Botan::byte const *>(tosign.data()),
-        tosign.size());
+              reinterpret_cast<Botan::byte const *>(tosign.data()),
+              tosign.size());
 #endif
-      sig_string = string(reinterpret_cast<char const*>(sig.begin()), sig.size());
+      sig_string = string(reinterpret_cast<char const *>(sig.begin()), sig.size());
     }
 
   if (s->ssh_sign_mode == "check" && ssh_sig.length() > 0)
@@ -1008,9 +1010,9 @@ key_store_state::migrate_old_key_pair
 
 void
 key_store_state::migrate_old_key_pair
-    (key_name const & id,
-     old_arc4_rsa_priv_key const & old_priv,
-     rsa_pub_key const & pub)
+(key_name const & id,
+ old_arc4_rsa_priv_key const & old_priv,
+ rsa_pub_key const & pub)
 {
   keypair kp;
   SecureVector<Botan::byte> arc4_key;
@@ -1103,9 +1105,9 @@ key_store::migrate_old_key_pair
 
 void
 key_store::migrate_old_key_pair
-    (key_name const & id,
-     old_arc4_rsa_priv_key const & old_priv,
-     rsa_pub_key const & pub)
+(key_name const & id,
+ old_arc4_rsa_priv_key const & old_priv,
+ rsa_pub_key const & pub)
 {
   s->migrate_old_key_pair(id, old_priv, pub);
 }
============================================================
--- src/key_store.hh	82ae903386eb882a602f5938ef5e5d9284187600
+++ src/key_store.hh	c0b0f30cfbbbb484dbbc37fdd1480ed2c50fa829
@@ -35,7 +35,7 @@ struct keypair
   {}
   keypair(rsa_pub_key const & a,
           rsa_priv_key const & b)
-   : pub(a), priv(b)
+    : pub(a), priv(b)
   {}
 };
 
============================================================
--- src/cset.cc	cc01fceb3ba2c16bb6079ab5f7a93f0d84f8ca85
+++ src/cset.cc	66a5d84e01a9327ea925530be133bb26745ac871
@@ -39,7 +39,7 @@ check_normalized(cset const & cs)
   {
     map<file_path, file_id>::const_iterator a = cs.files_added.begin();
     map<file_path, pair<file_id, file_id> >::const_iterator
-      d = cs.deltas_applied.begin();
+    d = cs.deltas_applied.begin();
     while (a != cs.files_added.end() && d != cs.deltas_applied.end())
       {
         // SPEEDUP?: should this use lower_bound instead of ++?  it should
@@ -58,7 +58,7 @@ check_normalized(cset const & cs)
   {
     set<pair<file_path, attr_key> >::const_iterator c = cs.attrs_cleared.begin();
     map<pair<file_path, attr_key>, attr_value>::const_iterator
-      s = cs.attrs_set.begin();
+    s = cs.attrs_set.begin();
     while (c != cs.attrs_cleared.end() && s != cs.attrs_set.end())
       {
         if (*c < s->first)
@@ -97,7 +97,7 @@ struct
 }
 
 struct
-detach
+  detach
 {
   detach(file_path const & src)
     : src_path(src),
@@ -125,7 +125,7 @@ struct
 };
 
 struct
-attach
+  attach
 {
   attach(node_id n,
          file_path const & p)
============================================================
--- src/cset.hh	90ea3f1a13761e0b3bd3ec3c894f7b91feb4a4c6
+++ src/cset.hh	fc616593ea10593fff39cd9c2cd88627d24b919b
@@ -69,13 +69,13 @@ struct cset
   bool operator==(cset const & other) const
   {
     return nodes_deleted == other.nodes_deleted
-      && dirs_added == other.dirs_added
-      && files_added == other.files_added
-      && nodes_renamed == other.nodes_renamed
-      && deltas_applied == other.deltas_applied
-      && attrs_cleared == other.attrs_cleared
-      && attrs_set == other.attrs_set
-      ;
+           && dirs_added == other.dirs_added
+           && files_added == other.files_added
+           && nodes_renamed == other.nodes_renamed
+           && deltas_applied == other.deltas_applied
+           && attrs_cleared == other.attrs_cleared
+           && attrs_set == other.attrs_set
+           ;
   }
 
   void apply_to(editable_tree & t) const;
============================================================
--- src/roster.cc	b4cec49faa1928388c7ab0ae1e2f389b202270b0
+++ src/roster.cc	3f5e134f840ac157909419f156b9e24ebbb5d10a
@@ -98,7 +98,7 @@ dump(marking_t const & marking, string &
   oss << "file_content: " << tmp << '\n';
   oss << "attrs (number: " << marking->attrs.size() << "):\n";
   for (map<attr_key, set<revision_id> >::const_iterator
-         i = marking->attrs.begin(); i != marking->attrs.end(); ++i)
+       i = marking->attrs.begin(); i != marking->attrs.end(); ++i)
     {
       dump(i->second, tmp);
       oss << "  " << i->first << ": " << tmp << '\n';
@@ -482,7 +482,7 @@ dfs_iter::advance_top()
       if (stack_top.second != stack_top.first->children.end())
         nextsize = stack_top.second->first().size();
 
-      int tmpsize = curr_path.size()-prevsize;
+      int tmpsize = curr_path.size() - prevsize;
       I(tmpsize >= 0);
       curr_path.resize(tmpsize);
       if (nextsize != 0)
@@ -534,7 +534,7 @@ dfs_iter::operator++()
         {
           if (track_path)
             {
-              curr_path.resize(curr_path.size()-1);
+              curr_path.resize(curr_path.size() - 1);
             }
           advance_top();
         }
@@ -572,7 +572,7 @@ shallow_equal(const_node_t a, const_node
   if (a->attrs != b->attrs)
     return false;
 
-  if (! same_type(a,b))
+  if (! same_type(a, b))
     return false;
 
   if (is_file_t(a))
@@ -596,8 +596,8 @@ shallow_equal(const_node_t a, const_node
             return false;
 
           dir_map::const_iterator
-            i = da->children.begin(),
-            j = db->children.begin();
+          i = da->children.begin(),
+          j = db->children.begin();
 
           while (i != da->children.end() && j != db->children.end())
             {
@@ -787,7 +787,7 @@ roster_t::get_node(node_id nid) const
 const_node_t
 roster_t::get_node(node_id nid) const
 {
-  node_t const &n(nodes.get_if_present(nid));
+  node_t const & n(nodes.get_if_present(nid));
   I(n);
   return n;
 }
@@ -1050,7 +1050,7 @@ roster_t::attach_node(node_id nid, node_
   // this iterator might point to old_locations.end(), because old_locations
   // only includes entries for renames, not new nodes
   map<node_id, pair<node_id, path_component> >::iterator
-    i = old_locations.find(nid);
+  i = old_locations.find(nid);
 
   if (null_node(parent) || name.empty())
     {
@@ -1199,7 +1199,7 @@ roster_t::check_sane(bool temp_nodes_ok)
   bool is_first = true;
   for (dfs_iter i(root_dir); !i.finished(); ++i)
     {
-      const_node_t const &n(*i);
+      const_node_t const & n(*i);
       if (is_first)
         {
           I(n->name.empty() && null_node(n->parent));
@@ -1280,9 +1280,9 @@ temp_node_id_source::next()
 node_id
 temp_node_id_source::next()
 {
-    node_id n = curr++;
-    I(temp_node(n));
-    return n;
+  node_id n = curr++;
+  I(temp_node(n));
+  return n;
 }
 
 editable_roster_base::editable_roster_base(roster_t & r, node_id_source & nis)
@@ -1708,7 +1708,7 @@ namespace
                            new_marking->file_content);
 
     for (attr_map_t::const_iterator i = n->attrs.begin();
-           i != n->attrs.end(); ++i)
+         i != n->attrs.end(); ++i)
       {
         set<revision_id> & new_marks = new_marking->attrs[i->first];
         I(new_marks.empty());
@@ -1740,7 +1740,7 @@ namespace
     if (same_nodes)
       {
         bool same_markings = left_marking == right_marking
-          || *left_marking == *right_marking;
+                             || *left_marking == *right_marking;
         if (same_markings)
           {
             // The child marking will be the same as both parent markings,
@@ -1895,8 +1895,8 @@ mark_merge_roster(roster_t const & left_
        i != merge.all_nodes().end(); ++i)
     {
       node_t const & n = i->second;
-      node_t const &left_node = left_roster.all_nodes().get_if_present(i->first);
-      node_t const &right_node = right_roster.all_nodes().get_if_present(i->first);
+      node_t const & left_node = left_roster.all_nodes().get_if_present(i->first);
+      node_t const & right_node = right_roster.all_nodes().get_if_present(i->first);
 
       bool exists_in_left = (left_node);
       bool exists_in_right = (right_node);
@@ -1935,7 +1935,8 @@ mark_merge_roster(roster_t const & left_
   drop_extra_markings(merge, new_markings);
 }
 
-namespace {
+namespace
+{
 
   class editable_roster_for_nonmerge
     : public editable_roster_base
@@ -2246,7 +2247,7 @@ namespace
           {
             safe_insert(cs.deltas_applied,
                         make_pair(to_p, make_pair(from_f->content,
-                                                   to_f->content)));
+                                                  to_f->content)));
           }
       }
 
@@ -2417,9 +2418,9 @@ make_restricted_roster(roster_t const & 
       map<node_id, node_t>::const_iterator n = selected.begin();
 
       L(FL("selected node %d %s parent %d")
-            % n->second->self
-            % n->second->name
-            % n->second->parent);
+        % n->second->self
+        % n->second->name
+        % n->second->parent);
 
       bool missing_parent = false;
 
@@ -2434,7 +2435,7 @@ make_restricted_roster(roster_t const & 
             % n->second->parent);
 
           map<node_id, node_t>::const_iterator
-            p = selected.find(n->second->parent);
+          p = selected.find(n->second->parent);
 
           if (p != selected.end())
             {
@@ -2510,10 +2511,10 @@ make_restricted_roster(roster_t const & 
   // sane.
 
   if (!restricted.all_nodes().empty() && !restricted.has_root())
-   {
-     W(F("restriction excludes addition of root directory"));
-     problems++;
-   }
+    {
+      W(F("restriction excludes addition of root directory"));
+      problems++;
+    }
 
   E(problems == 0, origin::user, F("invalid restriction"));
 
============================================================
--- src/roster.hh	2b498673424f4d69f74cdb1486b9f8b051766e34
+++ src/roster.hh	a57935b6e7ff282dde8f4e8adc3651979153c5b2
@@ -178,9 +178,9 @@ struct marking
   bool operator==(marking const & other) const
   {
     return birth_revision == other.birth_revision
-      && parent_name == other.parent_name
-      && file_content == other.file_content
-      && attrs == other.attrs;
+           && parent_name == other.parent_name
+           && file_content == other.file_content
+           && attrs == other.attrs;
   }
 };
 
@@ -293,11 +293,11 @@ public:
 
   friend bool equal_shapes(roster_t const & a, roster_t const & b);
 
-  void check_sane(bool temp_nodes_ok=false) const;
+  void check_sane(bool temp_nodes_ok = false) const;
 
   // verify that this roster is sane, and corresponds to the given
   // marking map
-  void check_sane_against(marking_map const & marks, bool temp_nodes_ok=false) const;
+  void check_sane_against(marking_map const & marks, bool temp_nodes_ok = false) const;
 
   void print_to(data & dat,
                 marking_map const & mm,
============================================================
--- src/parallel_iter.hh	4901d0d1b3daece3f0b292c4343de8634814522e
+++ src/parallel_iter.hh	5d108bc8dfcbf26711e06c39fa4e09a9a83a4547
@@ -97,7 +97,7 @@ namespace parallel
             {
               I(left_->first == right_->first);
               state_ = in_both;
-          }
+            }
         }
       return !finished_;
     }
============================================================
--- src/merge_roster.cc	98297d6264f77d540fc8e1578b1ebc5b2f36ec38
+++ src/merge_roster.cc	50bbb499093aad43247d7a8cb5ca0b4202ce4ed7
@@ -158,7 +158,7 @@ roster_merge_result::is_clean() const
 roster_merge_result::is_clean() const
 {
   return !has_non_content_conflicts()
-    && !has_content_conflicts();
+         && !has_content_conflicts();
 }
 
 bool
@@ -171,30 +171,30 @@ roster_merge_result::has_non_content_con
 roster_merge_result::has_non_content_conflicts() const
 {
   return missing_root_conflict
-    || !invalid_name_conflicts.empty()
-    || !directory_loop_conflicts.empty()
-    || !orphaned_node_conflicts.empty()
-    || !multiple_name_conflicts.empty()
-    || !duplicate_name_conflicts.empty()
-    || !attribute_conflicts.empty();
+         || !invalid_name_conflicts.empty()
+         || !directory_loop_conflicts.empty()
+         || !orphaned_node_conflicts.empty()
+         || !multiple_name_conflicts.empty()
+         || !duplicate_name_conflicts.empty()
+         || !attribute_conflicts.empty();
 }
 
 int
 roster_merge_result::count_supported_resolution() const
 {
   return orphaned_node_conflicts.size()
-    + file_content_conflicts.size()
-    + duplicate_name_conflicts.size();
+         + file_content_conflicts.size()
+         + duplicate_name_conflicts.size();
 }
 
 int
 roster_merge_result::count_unsupported_resolution() const
 {
   return (missing_root_conflict ? 1 : 0)
-    + invalid_name_conflicts.size()
-    + directory_loop_conflicts.size()
-    + multiple_name_conflicts.size()
-    + attribute_conflicts.size();
+         + invalid_name_conflicts.size()
+         + directory_loop_conflicts.size()
+         + multiple_name_conflicts.size()
+         + attribute_conflicts.size();
 }
 
 static void
@@ -202,7 +202,7 @@ dump_conflicts(roster_merge_result const
 {
   if (result.missing_root_conflict)
     out += (FL("missing_root_conflict: root directory has been removed\n"))
-      .str();
+           .str();
 
   dump(result.invalid_name_conflicts, out);
   dump(result.directory_loop_conflicts, out);
@@ -547,115 +547,115 @@ roster_merge(roster_t const & left_paren
             I(false);
 
           case parallel::in_left:
-            {
-              node_t const & left_n = i.left_data();
-              // we skip nodes that aren't in the result roster (were
-              // deleted in the lifecycles step above)
-              if (result.roster.has_node(left_n->self))
-                {
-                  // attach this node from the left roster. this may cause
-                  // a name collision with the previously attached node from
-                  // the other side of the merge.
-                  copy_node_forward(result, new_i->second, left_n, left_side);
-                  ++new_i;
-                }
-              ++left_mi;
-              break;
-            }
+          {
+            node_t const & left_n = i.left_data();
+            // we skip nodes that aren't in the result roster (were
+            // deleted in the lifecycles step above)
+            if (result.roster.has_node(left_n->self))
+              {
+                // attach this node from the left roster. this may cause
+                // a name collision with the previously attached node from
+                // the other side of the merge.
+                copy_node_forward(result, new_i->second, left_n, left_side);
+                ++new_i;
+              }
+            ++left_mi;
+            break;
+          }
 
           case parallel::in_right:
+          {
+            node_t const & right_n = i.right_data();
+            // we skip nodes that aren't in the result roster
+            if (result.roster.has_node(right_n->self))
+              {
+                // attach this node from the right roster. this may cause
+                // a name collision with the previously attached node from
+                // the other side of the merge.
+                copy_node_forward(result, new_i->second, right_n, right_side);
+                ++new_i;
+              }
+            ++right_mi;
+            break;
+          }
+
+          case parallel::in_both:
+          {
+            I(new_i->first == i.left_key());
+            I(left_mi->first == i.left_key());
+            I(right_mi->first == i.right_key());
+            node_t const & left_n = i.left_data();
+            marking_t const & left_marking = left_mi->second;
+            node_t const & right_n = i.right_data();
+            marking_t const & right_marking = right_mi->second;
+            node_t const & new_n = new_i->second;
+            // merge name
             {
-              node_t const & right_n = i.right_data();
-              // we skip nodes that aren't in the result roster
-              if (result.roster.has_node(right_n->self))
+              pair<node_id, path_component> left_name, right_name, new_name;
+              multiple_name_conflict conflict(new_n->self);
+              left_name = make_pair(left_n->parent, left_n->name);
+              right_name = make_pair(right_n->parent, right_n->name);
+              if (merge_scalar(left_name,
+                               left_marking->parent_name,
+                               left_uncommon_ancestors,
+                               right_name,
+                               right_marking->parent_name,
+                               right_uncommon_ancestors,
+                               new_name, conflict))
                 {
-                  // attach this node from the right roster. this may cause
-                  // a name collision with the previously attached node from
-                  // the other side of the merge.
-                  copy_node_forward(result, new_i->second, right_n, right_side);
-                  ++new_i;
+                  side_t winning_side;
+
+                  if (new_name == left_name)
+                    winning_side = left_side;
+                  else if (new_name == right_name)
+                    winning_side = right_side;
+                  else
+                    I(false);
+
+                  // attach this node from the winning side of the merge. if
+                  // there is a name collision the previously attached node
+                  // (which is blocking this one) must come from the other
+                  // side of the merge.
+                  assign_name(result, new_n->self,
+                              new_name.first, new_name.second, winning_side);
+
                 }
-              ++right_mi;
-              break;
+              else
+                {
+                  // unsuccessful merge; leave node detached and save
+                  // conflict object
+                  result.multiple_name_conflicts.push_back(conflict);
+                }
             }
-
-          case parallel::in_both:
-            {
-              I(new_i->first == i.left_key());
-              I(left_mi->first == i.left_key());
-              I(right_mi->first == i.right_key());
-              node_t const & left_n = i.left_data();
-              marking_t const & left_marking = left_mi->second;
-              node_t const & right_n = i.right_data();
-              marking_t const & right_marking = right_mi->second;
-              node_t const & new_n = new_i->second;
-              // merge name
+            // if a file, merge content
+            if (is_file_t(new_n))
               {
-                pair<node_id, path_component> left_name, right_name, new_name;
-                multiple_name_conflict conflict(new_n->self);
-                left_name = make_pair(left_n->parent, left_n->name);
-                right_name = make_pair(right_n->parent, right_n->name);
-                if (merge_scalar(left_name,
-                                 left_marking->parent_name,
+                file_content_conflict conflict(new_n->self);
+                if (merge_scalar(downcast_to_file_t(left_n)->content,
+                                 left_marking->file_content,
                                  left_uncommon_ancestors,
-                                 right_name,
-                                 right_marking->parent_name,
+                                 downcast_to_file_t(right_n)->content,
+                                 right_marking->file_content,
                                  right_uncommon_ancestors,
-                                 new_name, conflict))
+                                 downcast_to_file_t(new_n)->content,
+                                 conflict))
                   {
-                    side_t winning_side;
-
-                    if (new_name == left_name)
-                      winning_side = left_side;
-                    else if (new_name == right_name)
-                      winning_side = right_side;
-                    else
-                      I(false);
-
-                    // attach this node from the winning side of the merge. if
-                    // there is a name collision the previously attached node
-                    // (which is blocking this one) must come from the other
-                    // side of the merge.
-                    assign_name(result, new_n->self,
-                                new_name.first, new_name.second, winning_side);
-
+                    // successful merge
                   }
                 else
                   {
-                    // unsuccessful merge; leave node detached and save
-                    // conflict object
-                    result.multiple_name_conflicts.push_back(conflict);
+                    downcast_to_file_t(new_n)->content = file_id();
+                    result.file_content_conflicts.push_back(conflict);
                   }
               }
-              // if a file, merge content
-              if (is_file_t(new_n))
+            // merge attributes
+            {
+              attr_map_t::const_iterator left_ai = left_n->attrs.begin();
+              attr_map_t::const_iterator right_ai = right_n->attrs.begin();
+              parallel::iter<attr_map_t> attr_i(left_n->attrs,
+                                                right_n->attrs);
+              while(attr_i.next())
                 {
-                  file_content_conflict conflict(new_n->self);
-                  if (merge_scalar(downcast_to_file_t(left_n)->content,
-                                   left_marking->file_content,
-                                   left_uncommon_ancestors,
-                                   downcast_to_file_t(right_n)->content,
-                                   right_marking->file_content,
-                                   right_uncommon_ancestors,
-                                   downcast_to_file_t(new_n)->content,
-                                   conflict))
-                    {
-                      // successful merge
-                    }
-                  else
-                    {
-                      downcast_to_file_t(new_n)->content = file_id();
-                      result.file_content_conflicts.push_back(conflict);
-                    }
-                }
-              // merge attributes
-              {
-                attr_map_t::const_iterator left_ai = left_n->attrs.begin();
-                attr_map_t::const_iterator right_ai = right_n->attrs.begin();
-                parallel::iter<attr_map_t> attr_i(left_n->attrs,
-                                                  right_n->attrs);
-                while(attr_i.next())
-                {
                   switch (attr_i.state())
                     {
                     case parallel::invalid:
@@ -685,7 +685,7 @@ roster_merge(roster_t const & left_paren
                           // successful merge
                           safe_insert(new_n->attrs,
                                       make_pair(attr_i.left_key(),
-                                                     new_value));
+                                                new_value));
                         }
                       else
                         {
@@ -698,13 +698,13 @@ roster_merge(roster_t const & left_paren
                     }
 
                 }
-              }
             }
-            ++left_mi;
-            ++right_mi;
-            ++new_i;
-            break;
           }
+          ++left_mi;
+          ++right_mi;
+          ++new_i;
+          break;
+          }
       }
     I(left_mi == left_markings.end());
     I(right_mi == right_markings.end());
============================================================
--- src/merge_roster.hh	cd2da3b06f595187a27622a98580b0636aabea4b
+++ src/merge_roster.hh	b097fbd5bdf41f60737ffff0288034696f7d50c7
@@ -113,8 +113,10 @@ struct duplicate_name_conflict
   resolve_conflicts::file_resolution_t left_resolution, right_resolution;
 
   duplicate_name_conflict ()
-  {left_resolution.first = resolve_conflicts::none;
-    right_resolution.first = resolve_conflicts::none;};
+  {
+    left_resolution.first = resolve_conflicts::none;
+    right_resolution.first = resolve_conflicts::none;
+  };
 };
 
 // nodes with attribute conflicts are left attached in the resulting tree (unless
@@ -141,7 +143,7 @@ struct file_content_conflict
 
   file_content_conflict () :
     nid(the_null_node)
-    {resolution.first = resolve_conflicts::none;};
+  {resolution.first = resolve_conflicts::none;};
 
   file_content_conflict(node_id nid) :
     nid(nid) {resolution.first = resolve_conflicts::none;};
============================================================
--- src/merge_content.cc	76d5a0997d9217b309d75806f8059d8f56f2ca49
+++ src/merge_content.cc	f17fa659968e200e935c7fc580b2d50291cff91a
@@ -39,10 +39,10 @@ content_merge_database_adaptor::content_
 ///////////////////////////////////////////////////////////////////////////
 
 content_merge_database_adaptor::content_merge_database_adaptor(database & db,
-                                                               revision_id const & left,
-                                                               revision_id const & right,
-                                                               marking_map const & left_mm,
-                                                               marking_map const & right_mm)
+    revision_id const & left,
+    revision_id const & right,
+    marking_map const & left_mm,
+    marking_map const & right_mm)
   : db(db), left_rid (left), right_rid (right), left_mm(left_mm), right_mm(right_mm)
 {
   // FIXME: possibly refactor to run this lazily, as we don't
@@ -255,7 +255,7 @@ content_merge_workspace_adaptor::get_ver
 content_merge_workspace_adaptor::get_version(file_id const & ident,
                                              file_data & dat) const
 {
-  map<file_id,file_data>::const_iterator i = temporary_store.find(ident);
+  map<file_id, file_data>::const_iterator i = temporary_store.find(ident);
   if (i != temporary_store.end())
     dat = i->second;
   else if (db.file_version_exists(ident))
============================================================
--- src/merge_content.hh	c74c14bb8031b9bf67f684d9ddfdf6624f424f01
+++ src/merge_content.hh	4860686e0c6805239f17ad0a69e52aaf794aca40
@@ -21,7 +21,7 @@ struct
 struct options;
 
 struct
-content_merge_adaptor
+  content_merge_adaptor
 {
   virtual void record_merge(file_id const & left_ident,
                             file_id const & right_ident,
@@ -47,7 +47,7 @@ struct
 };
 
 struct
-content_merge_database_adaptor
+  content_merge_database_adaptor
   : public content_merge_adaptor
 {
   database & db;
@@ -86,7 +86,7 @@ struct
 };
 
 struct
-content_merge_workspace_adaptor
+  content_merge_workspace_adaptor
   : public content_merge_adaptor
 {
   std::map<file_id, file_data> temporary_store;
@@ -131,7 +131,7 @@ struct
 };
 
 struct
-content_merge_checkout_adaptor
+  content_merge_checkout_adaptor
   : public content_merge_adaptor
 {
   database & db;
@@ -162,7 +162,7 @@ struct
 
 
 struct
-content_merge_empty_adaptor
+  content_merge_empty_adaptor
   : public content_merge_adaptor
 {
   void record_merge(file_id const & left_ident,
============================================================
--- src/enumerator.cc	9e64e9a81b061b38abfcdf0b70e2047e1eb9436f
+++ src/enumerator.cc	0fb69cf10949735f6409400127c25b819d0c9e8f
@@ -45,7 +45,7 @@ revision_enumerator::get_revision_parent
 {
   parents.clear();
   typedef multimap<revision_id, revision_id>::const_iterator ci;
-  pair<ci,ci> range = inverse_graph.equal_range(child);
+  pair<ci, ci> range = inverse_graph.equal_range(child);
   for (ci i = range.first; i != range.second; ++i)
     {
       if (i->first == child)
@@ -59,7 +59,7 @@ revision_enumerator::all_parents_enumera
 revision_enumerator::all_parents_enumerated(revision_id const & child)
 {
   typedef multimap<revision_id, revision_id>::const_iterator ci;
-  pair<ci,ci> range = inverse_graph.equal_range(child);
+  pair<ci, ci> range = inverse_graph.equal_range(child);
   for (ci i = range.first; i != range.second; ++i)
     {
       if (i->first == child)
@@ -80,7 +80,7 @@ revision_enumerator::files_for_revision(
 void
 revision_enumerator::files_for_revision(revision_id const & r,
                                         set<file_id> & full_files,
-                                        set<pair<file_id,file_id> > & del_files)
+                                        set<pair<file_id, file_id> > & del_files)
 {
   // when we're sending a merge, we have to be careful if we
   // want to send as little data as possible. see bug #15846
@@ -116,7 +116,7 @@ revision_enumerator::files_for_revision(
 
       // Queue up all the file-deltas
       for (map<file_path, pair<file_id, file_id> >::const_iterator fd
-             = cs.deltas_applied.begin();
+           = cs.deltas_applied.begin();
            fd != cs.deltas_applied.end(); ++fd)
         {
           file_deltas[fd->second.second] = fd->second.first;
@@ -175,7 +175,7 @@ revision_enumerator::get_revision_certs(
   hashes.clear();
   bool found_one = false;
   typedef multimap<revision_id, id>::const_iterator ci;
-  pair<ci,ci> range = revision_certs.equal_range(rid);
+  pair<ci, ci> range = revision_certs.equal_range(rid);
   for (ci i = range.first; i != range.second; ++i)
     {
       found_one = true;
@@ -210,7 +210,7 @@ revision_enumerator::step()
           if (terminal_nodes.find(r) == terminal_nodes.end())
             {
               typedef multimap<revision_id, revision_id>::const_iterator ci;
-              pair<ci,ci> range = graph.equal_range(r);
+              pair<ci, ci> range = graph.equal_range(r);
               for (ci i = range.first; i != range.second; ++i)
                 {
                   // We push_front here rather than push_back in order
============================================================
--- src/enumerator.hh	5e52b8fae270dcabc4ada249327adcd496521a73
+++ src/enumerator.hh	ab18cb92e9046d2bd258f8f5ee07f353779f9dca
@@ -24,7 +24,7 @@ struct
 // used for sending sections of the revision graph through netsync.
 
 struct
-enumerator_callbacks
+  enumerator_callbacks
 {
   // Your callback will be asked whether you want the details of each rev
   // or cert, in order; you should return true for any rev or cert you want
@@ -42,7 +42,7 @@ struct
 };
 
 struct
-enumerator_item
+  enumerator_item
 {
   enum { fdata, fdelta, rev, cert } tag;
   id ident_a;
@@ -50,7 +50,7 @@ class
 };
 
 class
-revision_enumerator
+  revision_enumerator
 {
   project_t & project;
   enumerator_callbacks & cb;
@@ -65,7 +65,7 @@ revision_enumerator
   bool all_parents_enumerated(revision_id const & child);
   void files_for_revision(revision_id const & r,
                           std::set<file_id> & full_files,
-                          std::set<std::pair<file_id,file_id> > & del_files);
+                          std::set<std::pair<file_id, file_id> > & del_files);
   void get_revision_certs(revision_id const & rid,
                           std::vector<id> & certs);
 
============================================================
--- src/refiner.cc	2de8131df2595d5bcff5efe133ca1f768006d38f
+++ src/refiner.cc	a589006f39a5cc197ccbf4ccd3bda36c06e9e99d
@@ -148,7 +148,7 @@ refiner::note_subtree_shared_with_peer(m
 {
   prefix pref;
   our_node.extended_raw_prefix(slot, pref);
-  collect_items_in_subtree(table, pref, our_node.level+1, peer_items);
+  collect_items_in_subtree(table, pref, our_node.level + 1, peer_items);
 }
 
 refiner::refiner(netcmd_item_type type, protocol_voice voice, refiner_callbacks & cb)
============================================================
--- src/refiner.hh	543f5789aa9f74593f909e4b80cea986639228b4
+++ src/refiner.hh	0e206de6031f9af075210d259bbe5b64cca11efa
@@ -36,7 +36,7 @@ struct
 //    items you've determined the existence of during refinement.
 
 struct
-refiner_callbacks
+  refiner_callbacks
 {
   virtual void queue_refine_cmd(refinement_type ty,
                                 merkle_node const & our_node) = 0;
@@ -46,7 +46,7 @@ class
 };
 
 class
-refiner
+  refiner
 {
   netcmd_item_type type;
   protocol_voice voice;
============================================================
--- src/legacy.cc	c936ff983cd8724de334694888504bd82e0c5902
+++ src/legacy.cc	be33ebf3219e52634608496f20ebdda1cfa9df85
@@ -105,7 +105,7 @@ namespace legacy
             parser.esym(syms::to);
             parser.str(to_str);
             renames[old_rev][file_path_internal(to_str)]
-              = file_path_internal(from_str);
+            = file_path_internal(from_str);
           }
         else if (parser.symp(syms::patch))
           {
============================================================
--- src/legacy.hh	6758cc428dda77af648e712d572e0b6c764146b4
+++ src/legacy.hh	3289ce367079a8a3ffb96a9673d8f531c693d97d
@@ -48,8 +48,8 @@ namespace legacy
 
   ///////
   // parsing old-style manifests, for 'rosterify' and 'changesetify' commands
-  typedef std::map<file_path, file_id,
-                   std::less<file_path> > manifest_map;
+  typedef std::map < file_path, file_id,
+          std::less<file_path> > manifest_map;
   void read_manifest_map(manifest_data const & mdat,
                          manifest_map & man);
 
============================================================
--- src/cmd.hh	968a5cbc4b9dc33badca1e93a6f387eaa1c70c9e
+++ src/cmd.hh	2860f8c48f7e7ba850201c496a1bd33c3dbe7975
@@ -71,7 +71,7 @@ namespace commands
 
     utf8 const & primary_name(void) const;
     names_set const & names(void) const;
-    void add_alias(const utf8 &new_name);
+    void add_alias(const utf8 & new_name);
     command * parent(void) const;
     bool is_group(void) const;
     bool hidden(void) const;
@@ -98,9 +98,9 @@ namespace commands
     command const * find_command(command_id const & id) const;
     command * find_command(command_id const & id);
     std::set< command_id >
-      complete_command(command_id const & id,
-                       command_id completed = command_id(),
-                       bool completion_ok = true) const;
+    complete_command(command_id const & id,
+                     command_id completed = command_id(),
+                     bool completion_ok = true) const;
   };
 
   class cmdpreset
@@ -197,8 +197,8 @@ namespace commands { \
     cmdpreset_ ## C C ## _cmdpreset;                            \
   }                                                             \
   void commands::cmdpreset_ ## C ::preset(options & opts) const
-  
 
+
 #define _CMD2(C, name, aliases, parent, hidden, params, abstract, desc, opts) \
 namespace commands {                                                 \
   class cmd_ ## C : public command                                   \
============================================================
--- src/cmd_automate.cc	9a39706690d6edbe1233e0cb613bc3ab8d0b3fe3
+++ src/cmd_automate.cc	dff0158a02050a257e2c934debaa6c2478827eb8
@@ -41,7 +41,8 @@ CMD_GROUP(automate, "automate", "au", CM
           N_("Interface for scripted execution"),
           "");
 
-namespace commands {
+namespace commands
+{
   automate::automate(string const & name,
                      bool stdio_ok,
                      bool hidden,
@@ -66,7 +67,7 @@ namespace commands {
                  std::ostream & output) const
   {
     make_io_binary();
-    setlocale(LC_ALL,"POSIX");
+    setlocale(LC_ALL, "POSIX");
     exec_from_automate(app, execid, args, output);
   }
 
@@ -116,9 +117,9 @@ CMD_AUTOMATE(interface_version, "",
 }
 
 // these headers are outputted before any other output for stdio and remote_stdio
-void commands::get_stdio_headers(std::vector<std::pair<std::string,std::string> > & headers)
+void commands::get_stdio_headers(std::vector<std::pair<std::string, std::string> > & headers)
 {
-    headers.push_back(make_pair("format-version", stdio_format_version));
+  headers.push_back(make_pair("format-version", stdio_format_version));
 }
 
 // Name: bandtest
@@ -129,10 +130,10 @@ CMD_AUTOMATE_HIDDEN(bandtest, "{ info | 
 // Output format: None
 // Error conditions: None.
 CMD_AUTOMATE_HIDDEN(bandtest, "{ info | warning | error | ticker }",
-             N_("Emulates certain kinds of diagnostic / UI messages "
-                "for debugging and testing purposes, such as stdio"),
-             "",
-             options::opts::none)
+                    N_("Emulates certain kinds of diagnostic / UI messages "
+                       "for debugging and testing purposes, such as stdio"),
+                    "",
+                    options::opts::none)
 {
   E(args.size() == 1, origin::user,
     F("wrong argument count"));
@@ -153,9 +154,9 @@ CMD_AUTOMATE_HIDDEN(bandtest, "{ info | 
       int max = 20;
       second.set_total(max);
 
-      for (int i=0; i<max; i++)
+      for (int i = 0; i < max; i++)
         {
-          first+=3;
+          first += 3;
           ++second;
           usleep(100000); // 100ms
         }
@@ -165,9 +166,9 @@ CMD_AUTOMATE_HIDDEN(bandtest, "{ info | 
 }
 
 
-static void out_of_band_to_automate_streambuf(char channel, std::string const& text, void *opaque)
+static void out_of_band_to_automate_streambuf(char channel, std::string const & text, void * opaque)
 {
-  reinterpret_cast<automate_ostream*>(opaque)->write_out_of_band(channel, text);
+  reinterpret_cast<automate_ostream *>(opaque)->write_out_of_band(channel, text);
 }
 
 // Name: stdio
@@ -212,16 +213,18 @@ class done_reading_input {};
 class done_reading_input {};
 // lambda expressions would be really nice right about now
 // even the ability to use local classes as template arguments would help
-class local_stdio_pre_fn {
+class local_stdio_pre_fn
+{
   automate_reader & ar;
   vector<string> & cmdline;
-  vector<pair<string,string> > & params;
+  vector<pair<string, string> > & params;
 public:
   local_stdio_pre_fn(automate_reader & a, vector<string> & c,
-                     vector<pair<string,string> > & p)
+                     vector<pair<string, string> > & p)
     : ar(a), cmdline(c), params(p)
   { }
-  void operator()() {
+  void operator()()
+  {
     if (!ar.get_command(params, cmdline))
       throw done_reading_input();
   }
@@ -259,9 +262,9 @@ CMD_AUTOMATE_NO_STDIO(stdio, "",
       try
         {
           pair<int, string> err = automate_stdio_helpers::
-            automate_stdio_shared_body(app, cmdline, params, os,
-                                       local_stdio_pre_fn(ar, cmdline, params),
-                                       boost::function<void(command_id const &)>());
+                                  automate_stdio_shared_body(app, cmdline, params, os,
+                                                             local_stdio_pre_fn(ar, cmdline, params),
+                                                             boost::function<void(command_id const &)>());
           if (err.first != 0)
             os.write_out_of_band('e', err.second);
           os.end_cmd(err.first);
@@ -279,7 +282,7 @@ LUAEXT(change_workspace, )
 LUAEXT(change_workspace, )
 {
   const system_path ws(luaL_checkstring(LS, -1), origin::user);
-  app_state* app_p = get_app_state(LS);
+  app_state * app_p = get_app_state(LS);
 
   try
     {
@@ -317,12 +320,12 @@ LUAEXT(mtn_automate, )
 
   try
     {
-      app_state* app_p = get_app_state(LS);
+      app_state * app_p = get_app_state(LS);
       I(app_p != NULL);
       I(app_p->lua.check_lua_state(LS));
       E(app_p->mtn_automate_allowed, origin::user,
-          F("it is illegal to call the mtn_automate() lua extension,\n"
-            "unless from a command function defined by register_command()."));
+        F("it is illegal to call the mtn_automate() lua extension,\n"
+          "unless from a command function defined by register_command()."));
 
       // don't allow recursive calls
       app_p->mtn_automate_allowed = false;
@@ -335,7 +338,7 @@ LUAEXT(mtn_automate, )
       L(FL("Starting call to mtn_automate lua hook"));
 
       vector<string> args;
-      for (int i=1; i<=n; i++)
+      for (int i = 1; i <= n; i++)
         {
           string next_arg(luaL_checkstring(LS, i));
           L(FL("arg: %s") % next_arg);
@@ -346,13 +349,13 @@ LUAEXT(mtn_automate, )
       commands::automate const * cmd;
 
       automate_stdio_helpers::
-        automate_stdio_shared_setup(*app_p, args, 0,
-                                    id, cmd,
-                                    automate_stdio_helpers::no_force_stdio_ticker);
+      automate_stdio_shared_setup(*app_p, args, 0,
+                                  id, cmd,
+                                  automate_stdio_helpers::no_force_stdio_ticker);
 
 
       commands::automate const * acmd
-        = dynamic_cast< commands::automate const * >(cmd);
+      = dynamic_cast< commands::automate const * >(cmd);
       I(acmd);
 
 
============================================================
--- src/cmd_db.cc	6bc1764d38063b0f5a23dd490f02786daafad0b7
+++ src/cmd_db.cc	bfab83a5c295268ab4836ec9a2b57cae28a59307
@@ -225,7 +225,7 @@ CMD(db_kill_rev_locally, "kill_revision"
               "the workspace contains uncommitted changes.\n"
               "Consider updating your workspace to another revision first,\n"
               "before you try to kill this revision again.")
-              % revid);
+            % revid);
 
           P(F("applying changes from %s on the current workspace")
             % revid);
@@ -256,8 +256,8 @@ CMD(db_kill_certs_locally, "kill_certs",
   if (args.size() < 2 || args.size() > 3)
     throw usage(execid);
 
-  string selector = idx(args,0)();
-  cert_name name = typecast_vocab<cert_name>(idx(args,1));
+  string selector = idx(args, 0)();
+  cert_name name = typecast_vocab<cert_name>(idx(args, 1));
 
   database db(app);
   project_t project(db);
@@ -291,7 +291,7 @@ CMD(db_kill_certs_locally, "kill_certs",
     }
   else
     {
-      cert_value value = typecast_vocab<cert_value>(idx(args,2));
+      cert_value value = typecast_vocab<cert_value>(idx(args, 2));
       L(FL("deleting all certs with name '%s' and value '%s' on %d revisions")
         % name % value % revisions.size());
       for (set<revision_id>::const_iterator r = revisions.begin();
@@ -384,9 +384,9 @@ CMD_HIDDEN(clear_epoch, "clear_epoch", "
 }
 
 CMD_HIDDEN(clear_epoch, "clear_epoch", "", CMD_REF(db), "BRANCH",
-    N_("Clears the branch's epoch"),
-    "",
-    options::opts::none)
+           N_("Clears the branch's epoch"),
+           "",
+           options::opts::none)
 {
   if (args.size() != 1)
     throw usage(execid);
@@ -530,7 +530,7 @@ CMD(cleanup_workspace_list, "cleanup_wor
         {
           L(FL("ignoring workspace '%s', expected database %s, "
                "but has %s configured in _MTN/options")
-              % workspace_path % db.get_filename() % workspace_db_path);
+            % workspace_path % db.get_filename() % workspace_db_path);
           continue;
         }
 
@@ -604,7 +604,7 @@ CMD_HIDDEN(test_migration_step, "test_mi
 
   if (args.size() != 1)
     throw usage(execid);
-  db.test_migration_step(keys, idx(args,0)());
+  db.test_migration_step(keys, idx(args, 0)());
 }
 
 CMD_HIDDEN(rev_height, "rev_height", "", CMD_REF(informative), N_("REV"),
@@ -627,10 +627,10 @@ CMD_HIDDEN(load_revisions, "load_revisio
 // loading revisions is relatively fast
 
 CMD_HIDDEN(load_revisions, "load_revisions", "", CMD_REF(db), "",
-    N_("Load all revisions from the database"),
-    N_("This command loads all revisions from the database and is "
-       "intended to be used for timing revision loading performance."),
-    options::opts::none)
+           N_("Load all revisions from the database"),
+           N_("This command loads all revisions from the database and is "
+              "intended to be used for timing revision loading performance."),
+           options::opts::none)
 {
   database db(app);
   set<revision_id> ids;
@@ -656,10 +656,10 @@ CMD_HIDDEN(load_rosters, "load_rosters",
 // loading rosters is slow compared with files, revisions or certs
 
 CMD_HIDDEN(load_rosters, "load_rosters", "", CMD_REF(db), "",
-    N_("Load all roster versions from the database"),
-    N_("This command loads all roster versions from the database and is "
-       "intended to be used for timing roster reconstruction performance."),
-    options::opts::none)
+           N_("Load all roster versions from the database"),
+           N_("This command loads all roster versions from the database and is "
+              "intended to be used for timing roster reconstruction performance."),
+           options::opts::none)
 {
   database db(app);
   set<revision_id> ids;
@@ -684,10 +684,10 @@ CMD_HIDDEN(load_files, "load_files", "",
 // loading files is slower than revisions but faster than rosters
 
 CMD_HIDDEN(load_files, "load_files", "", CMD_REF(db), "",
-    N_("Load all file versions from the database"),
-    N_("This command loads all files versions from the database and is "
-       "intended to be used for timing file reconstruction performance."),
-    options::opts::none)
+           N_("Load all file versions from the database"),
+           N_("This command loads all files versions from the database and is "
+              "intended to be used for timing file reconstruction performance."),
+           options::opts::none)
 {
   database db(app);
   set<file_id> files;
@@ -710,10 +710,10 @@ CMD_HIDDEN(load_certs, "load_certs", "",
 // loading certs is fast
 
 CMD_HIDDEN(load_certs, "load_certs", "", CMD_REF(db), "",
-    N_("Load all certs from the database"),
-    N_("This command loads all certs from the database and is "
-       "intended to be used for timing cert loading performance."),
-    options::opts::none)
+           N_("Load all certs from the database"),
+           N_("This command loads all certs from the database and is "
+              "intended to be used for timing cert loading performance."),
+           options::opts::none)
 {
   database db(app);
   vector<cert> certs;
============================================================
--- src/cmd_diff_log.cc	b24dffb0470d057fa1e91ccc5a81627b40252e78
+++ src/cmd_diff_log.cc	0267ac98cd2578c6c3850a324b46661fb44468a3
@@ -161,7 +161,7 @@ dump_diffs(lua_hooks & lua,
               // right_id is null
 
               path_node_data.insert(make_pair(dat.left_path, dat));
-          }
+            }
           break;
 
         case parallel::in_right:
@@ -200,7 +200,7 @@ dump_diffs(lua_hooks & lua,
     }
 
   for (std::multimap<file_path, diff_node_data>::iterator i = path_node_data.begin();
-         i != path_node_data.end(); ++i)
+       i != path_node_data.end(); ++i)
     {
       diff_node_data & dat = (*i).second;
       data left_data, right_data;
@@ -537,19 +537,19 @@ struct rev_cmp
   }
 };
 
-typedef priority_queue<pair<rev_height, revision_id>,
-                       vector<pair<rev_height, revision_id> >,
-                       rev_cmp> frontier_t;
+typedef priority_queue < pair<rev_height, revision_id>,
+        vector<pair<rev_height, revision_id> >,
+        rev_cmp > frontier_t;
 
 void
-log_print_rev (app_state &      app,
-               database &       db,
-               project_t &      project,
+log_print_rev (app_state    &   app,
+               database    &    db,
+               project_t    &   project,
                revision_id      rid,
-               revision_t &     rev,
+               revision_t   &   rev,
                string           date_fmt,
                node_restriction mask,
-               ostream &        out)
+               ostream     &    out)
 {
   cert_name const author_name(author_cert_name);
   cert_name const date_name(date_cert_name);
@@ -665,16 +665,19 @@ log_common (app_state & app,
            i != rev.edges.end(); i++)
         {
           revision_id rid = edge_old_revision(i);
-          if ((FL("%s") % rid).str().empty()) {
-            W(F("workspace has no parent revision, probably an empty branch"));
-          } else {
-            E(db.revision_exists(rid), origin::user,
-              F("workspace parent revision %s not found - "
-                "did you specify a wrong database?") % rid);
-            starting_revs.insert(rid);
-            if (i == rev.edges.begin())
-              first_rid = rid;
-          }
+          if ((FL("%s") % rid).str().empty())
+            {
+              W(F("workspace has no parent revision, probably an empty branch"));
+            }
+          else
+            {
+              E(db.revision_exists(rid), origin::user,
+                F("workspace parent revision %s not found - "
+                  "did you specify a wrong database?") % rid);
+              starting_revs.insert(rid);
+              if (i == rev.edges.begin())
+                first_rid = rid;
+            }
         }
     }
   else if (!app.opts.from.empty())
@@ -880,7 +883,7 @@ log_common (app_state & app,
                   marked_revs.insert(marks->parent_name.begin(),
                                      marks->parent_name.end());
                   for (map<attr_key, set<revision_id> >::const_iterator
-                         a = marks->attrs.begin(); a != marks->attrs.end(); ++a)
+                       a = marks->attrs.begin(); a != marks->attrs.end(); ++a)
                     marked_revs.insert(a->second.begin(), a->second.end());
                 }
             }
@@ -910,7 +913,7 @@ log_common (app_state & app,
       if (app.opts.no_merges && rev.is_merge_node())
         print_this = false;
       else if (!app.opts.revision.empty() &&
-          selected_revs.find(rid) == selected_revs.end())
+               selected_revs.find(rid) == selected_revs.end())
         print_this = false;
 
       set<revision_id> interesting;
@@ -994,10 +997,10 @@ CMD_AUTOMATE(log, N_("[PATH] ..."),
 CMD_AUTOMATE(log, N_("[PATH] ..."),
              N_("Lists the selected revision history"),
              "",
-    options::opts::last | options::opts::next |
-    options::opts::from | options::opts::to |
-    options::opts::depth | options::opts::exclude |
-    options::opts::no_merges)
+             options::opts::last | options::opts::next |
+             options::opts::from | options::opts::to |
+             options::opts::depth | options::opts::exclude |
+             options::opts::no_merges)
 {
   log_common (app, args, true, output);
 }
============================================================
--- src/cmd_files.cc	f15378efd25e597762776ad6e9c6f78c1a4ff191
+++ src/cmd_files.cc	70fbf79b0e8c66d1d20c16275423fca4cfbd3a07
@@ -64,9 +64,9 @@ CMD(fmerge, "fmerge", "", CMD_REF(debug)
     throw usage(execid);
 
   file_id
-    anc_id(decode_hexenc_as<file_id>(idx(args, 0)(), origin::user)),
-    left_id(decode_hexenc_as<file_id>(idx(args, 1)(), origin::user)),
-    right_id(decode_hexenc_as<file_id>(idx(args, 2)(), origin::user));
+  anc_id(decode_hexenc_as<file_id>(idx(args, 0)(), origin::user)),
+         left_id(decode_hexenc_as<file_id>(idx(args, 1)(), origin::user)),
+         right_id(decode_hexenc_as<file_id>(idx(args, 2)(), origin::user));
 
   file_data anc, left, right;
 
@@ -104,12 +104,12 @@ CMD(fdiff, "fdiff", "", CMD_REF(debug), 
     throw usage(execid);
 
   string const
-    & src_name = idx(args, 0)(),
+  & src_name = idx(args, 0)(),
     & dst_name = idx(args, 1)();
 
   file_id
-    src_id(decode_hexenc_as<file_id>(idx(args, 2)(), origin::user)),
-    dst_id(decode_hexenc_as<file_id>(idx(args, 3)(), origin::user));
+  src_id(decode_hexenc_as<file_id>(idx(args, 2)(), origin::user)),
+         dst_id(decode_hexenc_as<file_id>(idx(args, 3)(), origin::user));
 
   file_data src, dst;
 
@@ -188,11 +188,11 @@ CMD(annotate, "annotate", "", CMD_REF(in
   // find the version of the file requested
   E(roster.has_node(file), origin::user,
     F("no such file '%s' in revision %s")
-      % file % rid);
+    % file % rid);
   const_node_t node = roster.get_node(file);
   E(is_file_t(node), origin::user,
     F("'%s' in revision %s is not a file")
-      % file % rid);
+    % file % rid);
 
   const_file_t file_node = downcast_to_file_t(node);
   L(FL("annotate for file_id %s") % file_node->self);
============================================================
--- src/cmd_key_cert.cc	a48989eb8c9721a45469b0b1f44b36b1771a2eeb
+++ src/cmd_key_cert.cc	2e654c11156881fc618833ee7c394917dbf014e2
@@ -178,9 +178,9 @@ CMD_AUTOMATE(drop_public_key, N_("KEY_NA
 }
 
 CMD_AUTOMATE(drop_public_key, N_("KEY_NAME_OR_HASH"),
-    N_("Drops a public key"),
-    "",
-    options::opts::none)
+             N_("Drops a public key"),
+             "",
+             options::opts::none)
 {
   E(args.size() == 1, origin::user,
     F("wrong argument count"));
@@ -340,16 +340,16 @@ CMD(trusted, "trusted", "", CMD_REF(key_
        ostream_iterator<key_identity_info>(all_signers, " "));
 
   cout << (F("if a cert on: %s\n"
-            "with key: %s\n"
-            "and value: %s\n"
-            "was signed by: %s\n"
-            "it would be: %s")
-    % ident
-    % cname
-    % value
-    % all_signers.str()
-    % (trusted ? _("trusted") : _("UNtrusted")))
-    << '\n'; // final newline is kept out of the translation
+             "with key: %s\n"
+             "and value: %s\n"
+             "was signed by: %s\n"
+             "it would be: %s")
+           % ident
+           % cname
+           % value
+           % all_signers.str()
+           % (trusted ? _("trusted") : _("UNtrusted")))
+       << '\n'; // final newline is kept out of the translation
 }
 
 CMD(tag, "tag", "", CMD_REF(review), N_("REVISION TAGNAME"),
============================================================
--- src/cmd_list.cc	5eee1fff3753df893cfc1325d362f6bc8445dd11
+++ src/cmd_list.cc	a7e0dc06b350912b9af7fbb8d7bc7255646b0503
@@ -55,7 +55,8 @@ CMD_GROUP(list, "list", "ls", CMD_REF(in
              "or known, unknown, intentionally ignored, missing, or "
              "changed-state files."));
 
-namespace {
+namespace
+{
   // for 'ls certs' and 'ls tags'
   string format_key(key_identity_info const & info)
   {
@@ -309,7 +310,8 @@ typedef map<key_id, key_location_info> k
 };
 typedef map<key_id, key_location_info> key_map;
 
-namespace {
+namespace
+{
   void get_key_list(database & db,
                     key_store & keys,
                     lua_hooks & lua,
@@ -480,7 +482,7 @@ CMD(branches, "branches", "", CMD_REF(li
 {
   globish inc("*", origin::internal);
   if (args.size() == 1)
-    inc = globish(idx(args,0)(), origin::user);
+    inc = globish(idx(args, 0)(), origin::user);
   else if (args.size() > 1)
     throw usage(execid);
 
@@ -508,7 +510,7 @@ CMD(epochs, "epochs", "", CMD_REF(list),
   if (args.empty())
     {
       for (map<branch_name, epoch_data>::const_iterator
-             i = epochs.begin();
+           i = epochs.begin();
            i != epochs.end(); ++i)
         {
           cout << encode_hexenc(i->second.inner()(),
@@ -539,7 +541,7 @@ CMD(tags, "tags", "", CMD_REF(list), "[P
 {
   globish inc("*", origin::internal);
   if (args.size() == 1)
-    inc = globish(idx(args,0)(), origin::user);
+    inc = globish(idx(args, 0)(), origin::user);
   else if (args.size() > 1)
     throw usage(execid);
 
@@ -566,7 +568,7 @@ CMD(tags, "tags", "", CMD_REF(list), "[P
           hexenc<id> hexid;
           encode_hexenc(i->ident.inner(), hexid);
 
-          cout << i->name << ' ' << hexid().substr(0,10) << "... ";
+          cout << i->name << ' ' << hexid().substr(0, 10) << "... ";
 
           for (vector<cert>::const_iterator c = certs.begin();
                c != certs.end(); ++c)
@@ -646,7 +648,7 @@ print_workspace_info(database & db, lua_
         {
           L(FL("ignoring workspace '%s', expected database %s, "
                "but has %s configured in _MTN/options")
-              % workspace_path % db_path % workspace_db_path);
+            % workspace_path % db_path % workspace_db_path);
           continue;
         }
 
@@ -659,8 +661,8 @@ print_workspace_info(database & db, lua_
       out << indent << F("%s (in '%s')") % workspace_branch % workspace_path << '\n';
     }
 
-    if (!has_valid_workspaces)
-      out << indent << F("no known valid workspaces") << '\n';
+  if (!has_valid_workspaces)
+    out << indent << F("no known valid workspaces") << '\n';
 }
 
 CMD(workspaces, "workspaces", "", CMD_REF(list), "",
@@ -710,8 +712,8 @@ CMD(databases, "databases", "dbs", CMD_R
             }
 
           string db_alias = ":" + db_path.as_internal().substr(
-            search_path.as_internal().size() + 1
-          );
+                              search_path.as_internal().size() + 1
+                            );
 
           options db_opts;
           db_opts.dbname_type = managed_db;
@@ -729,7 +731,7 @@ CMD(databases, "databases", "dbs", CMD_R
               string failure = f.what();
               for (size_t pos = failure.find(prefix);
                    pos != string::npos; pos = failure.find(prefix))
-                 failure.replace(pos, prefix.size(), "");
+                failure.replace(pos, prefix.size(), "");
 
               W(F("%s") % failure);
               W(F("ignoring database '%s'") % db_path);
@@ -880,7 +882,7 @@ CMD(changed, "changed", "", CMD_REF(list
     {
       set<node_id> nodes;
       roster_t const & old_roster
-        = *safe_get(parents, edge_old_revision(i)).first;
+      = *safe_get(parents, edge_old_revision(i)).first;
       select_nodes_modified_by_cset(edge_changes(i),
                                     old_roster, new_roster, nodes);
 
============================================================
--- src/cmd_merging.cc	84b177469ecb60664be23c42c38cc649d23899a7
+++ src/cmd_merging.cc	f5fdbc18e48e9345a8a00dd544cd7249e0089cfc
@@ -293,7 +293,7 @@ update(app_state & app,
 
   // Get the OLD and WORKING rosters
   roster_t_cp old_roster
-    = parent_cached_roster(parents.begin()).first;
+  = parent_cached_roster(parents.begin()).first;
   MM(*old_roster);
 
   shared_ptr<roster_t> working_roster = shared_ptr<roster_t>(new roster_t());
@@ -540,7 +540,7 @@ CMD(merge, "merge", "", CMD_REF(tree), "
     }
 
   P(FP("%d head on branch '%s'", "%d heads on branch '%s'", heads.size())
-      % heads.size() % app.opts.branch);
+    % heads.size() % app.opts.branch);
 
   // avoid failure after lots of work
   cache_user_key(app.opts, project, keys, app.lua);
@@ -648,14 +648,14 @@ void perform_merge_into_dir(app_state & 
   if (*src_i == *dst_i || is_ancestor(db, *src_i, *dst_i))
     {
       P(F("branch '%s' is up-to-date with respect to branch '%s'")
-          % idx(args, 1)() % idx(args, 0)());
+        % idx(args, 1)() % idx(args, 0)());
       P(F("no action taken"));
       return;
     }
 
   cache_user_key(app.opts, project, keys, app.lua);
 
-  P(F("propagating %s -> %s") % idx(args,0) % idx(args,1));
+  P(F("propagating %s -> %s") % idx(args, 0) % idx(args, 1));
   P(F("[left]  %s") % *src_i);
   P(F("[right] %s") % *dst_i);
 
@@ -681,8 +681,8 @@ void perform_merge_into_dir(app_state & 
         MM(right_roster);
         marking_map left_marking_map, right_marking_map;
         set<revision_id>
-          left_uncommon_ancestors,
-          right_uncommon_ancestors;
+        left_uncommon_ancestors,
+        right_uncommon_ancestors;
 
         db.get_roster(left_rid, left_roster, left_marking_map);
         db.get_roster(right_rid, right_roster, right_marking_map);
@@ -690,7 +690,7 @@ void perform_merge_into_dir(app_state & 
                                   left_uncommon_ancestors,
                                   right_uncommon_ancestors);
 
-        if (!idx(args,2)().empty())
+        if (!idx(args, 2)().empty())
           {
             dir_t moved_root = left_roster.root();
             file_path pth = file_path_external(idx(args, 2));
@@ -720,12 +720,12 @@ void perform_merge_into_dir(app_state & 
                      result);
 
         content_merge_database_adaptor
-          dba(db, left_rid, right_rid, left_marking_map, right_marking_map);
+        dba(db, left_rid, right_rid, left_marking_map, right_marking_map);
 
         bool resolutions_given;
 
         parse_resolve_conflicts_opts
-          (app.opts, left_rid, left_roster, right_rid, right_roster, result, resolutions_given);
+        (app.opts, left_rid, left_roster, right_rid, right_roster, result, resolutions_given);
 
         resolve_merge_conflicts(app.lua, app.opts, left_roster, right_roster,
                                 result, dba, resolutions_given);
@@ -744,7 +744,7 @@ void perform_merge_into_dir(app_state & 
       bool log_message_given;
       utf8 log_message;
       utf8 log_prefix = utf8((FL("propagate from branch '%s' (head %s)\n"
-                               "            to branch '%s' (head %s)\n")
+                                 "            to branch '%s' (head %s)\n")
                               % idx(args, 0)
                               % *src_i
                               % idx(args, 1)
@@ -829,7 +829,7 @@ CMD(merge_into_workspace, "merge_into_wo
 
     E(parent_roster(parents.begin()) == *working_roster, origin::user,
       F("'%s' can only be used in a workspace with no pending changes") %
-        join_words(execid)());
+      join_words(execid)());
 
     left_id = parent_id(parents.begin());
     left = parent_cached_roster(parents.begin());
@@ -855,8 +855,8 @@ CMD(merge_into_workspace, "merge_into_wo
 
   set<revision_id> left_uncommon_ancestors, right_uncommon_ancestors;
   db.get_uncommon_ancestors(left_id, right_id,
-                                left_uncommon_ancestors,
-                                right_uncommon_ancestors);
+                            left_uncommon_ancestors,
+                            right_uncommon_ancestors);
 
   roster_merge_result merge_result;
   MM(merge_result);
@@ -1098,8 +1098,8 @@ CMD(show_conflicts, "show_conflicts", ""
   if (args.size() != 2)
     throw usage(execid);
   revision_id l_id, r_id;
-  complete(app.opts, app.lua, project, idx(args,0)(), l_id);
-  complete(app.opts, app.lua, project, idx(args,1)(), r_id);
+  complete(app.opts, app.lua, project, idx(args, 0)(), l_id);
+  complete(app.opts, app.lua, project, idx(args, 1)(), r_id);
 
   show_conflicts_core(db, app.lua, l_id, r_id,
                       false, // basic_io
@@ -1134,8 +1134,8 @@ static void get_conflicts_rids(args_vect
   else if (args.size() == 2)
     {
       // get ids from args
-      complete(app.opts, app.lua, project, idx(args,0)(), left_rid);
-      complete(app.opts, app.lua, project, idx(args,1)(), right_rid);
+      complete(app.opts, app.lua, project, idx(args, 0)(), left_rid);
+      complete(app.opts, app.lua, project, idx(args, 1)(), right_rid);
     }
   else
     E(false, origin::user, F("wrong argument count"));
@@ -1222,12 +1222,12 @@ CMD_AUTOMATE(file_merge, N_("LEFT_REVID 
   project_t project(db);
 
   revision_id left_rid;
-  complete(app.opts, app.lua, project, idx(args,0)(), left_rid);
-  file_path const left_path = file_path_external(idx(args,1));
+  complete(app.opts, app.lua, project, idx(args, 0)(), left_rid);
+  file_path const left_path = file_path_external(idx(args, 1));
 
   revision_id right_rid;
-  complete(app.opts, app.lua, project, idx(args,2)(), right_rid);
-  file_path const right_path = file_path_external(idx(args,3));
+  complete(app.opts, app.lua, project, idx(args, 2)(), right_rid);
+  file_path const right_path = file_path_external(idx(args, 3));
 
   roster_t left_roster;
   roster_t right_roster;
@@ -1525,8 +1525,8 @@ CMD(get_roster, "get_roster", "", CMD_RE
 
           set<revision_id> left_uncommon_ancestors, right_uncommon_ancestors;
           db.get_uncommon_ancestors(left_id, right_id,
-                                        left_uncommon_ancestors,
-                                        right_uncommon_ancestors);
+                                    left_uncommon_ancestors,
+                                    right_uncommon_ancestors);
 
           mark_merge_roster(left_roster, left_markings,
                             left_uncommon_ancestors,
============================================================
--- src/cmd_netsync.cc	88ca982ea191c22c88b490e87695b359754aff45
+++ src/cmd_netsync.cc	e163b9d3dc58d4efec67576c1f10c22ff43a66e6
@@ -57,8 +57,8 @@ extract_client_connection_info(options &
 {
   if (opts.remote_stdio_host_given)
     {
-       netsync_connection_info::setup_from_uri(opts, project.db, lua, type,
-                                               opts.remote_stdio_host, info);
+      netsync_connection_info::setup_from_uri(opts, project.db, lua, type,
+                                              opts.remote_stdio_host, info);
     }
   else
     {
@@ -75,8 +75,8 @@ extract_client_connection_info(options &
           arg_type server = idx(args, 0);
           vector<arg_type> include;
           include.insert(include.begin(),
-                                  args.begin() + 1,
-                                  args.end());
+                         args.begin() + 1,
+                         args.end());
           vector<arg_type> exclude = opts.exclude;
 
           netsync_connection_info::setup_from_server_and_pattern(opts, project.db,
@@ -86,13 +86,13 @@ extract_client_connection_info(options &
                                                                  info);
         }
       else
-       {
-         // if no argument has been given and the --remote_stdio_host
-         // option has been left out, try to load the database defaults
-         // at least
-         netsync_connection_info::setup_default(opts, project.db,
-                                                lua, type, info);
-       }
+        {
+          // if no argument has been given and the --remote_stdio_host
+          // option has been left out, try to load the database defaults
+          // at least
+          netsync_connection_info::setup_default(opts, project.db,
+                                                 lua, type, info);
+        }
     }
 
   opts.no_transport_auth =
@@ -122,7 +122,7 @@ CMD_AUTOMATE_NO_STDIO(remote_stdio,
       W(F("no database given; assuming '%s' database. This means that\n"
           "we can't verify the server key, because we have no record of\n"
           "what it should be.")
-          % memory_db_identifier);
+        % memory_db_identifier);
       app.opts.dbname_type = memory_db;
       app.opts.dbname_given = true;
     }
@@ -158,7 +158,7 @@ parse_options_from_args(args_vector & ar
       string name;
       arg_type arg;
 
-      if (idx(args,i)() == "--" || seen_dashdash)
+      if (idx(args, i)() == "--" || seen_dashdash)
         {
           if (!seen_dashdash)
             {
@@ -167,34 +167,34 @@ parse_options_from_args(args_vector & ar
           ++i;
           continue;
         }
-      else if (idx(args,i)().substr(0,2) == "--")
+      else if (idx(args, i)().substr(0, 2) == "--")
         {
-          string::size_type equals = idx(args,i)().find('=');
+          string::size_type equals = idx(args, i)().find('=');
           bool has_arg;
           if (equals == string::npos)
             {
-              name = idx(args,i)().substr(2);
+              name = idx(args, i)().substr(2);
               has_arg = false;
             }
           else
             {
-              name = idx(args,i)().substr(2, equals-2);
+              name = idx(args, i)().substr(2, equals - 2);
               has_arg = true;
             }
 
           if (has_arg)
             {
-              arg = arg_type(idx(args,i)().substr(equals+1), origin::user);
+              arg = arg_type(idx(args, i)().substr(equals + 1), origin::user);
             }
         }
-      else if (idx(args,i)().substr(0,1) == "-")
+      else if (idx(args, i)().substr(0, 1) == "-")
         {
-          name = idx(args,i)().substr(1,1);
-          bool has_arg = idx(args,i)().size() > 2;
+          name = idx(args, i)().substr(1, 1);
+          bool has_arg = idx(args, i)().size() > 2;
 
           if (has_arg)
             {
-              arg = arg_type(idx(args,i)().substr(2), origin::user);
+              arg = arg_type(idx(args, i)().substr(2), origin::user);
             }
         }
       else
@@ -225,7 +225,7 @@ CMD_AUTOMATE_NO_STDIO(remote,
       W(F("no database given; assuming '%s' database. This means that\n"
           "we can't verify the server key, because we have no record of\n"
           "what it should be.")
-          % memory_db_identifier);
+        % memory_db_identifier);
       app.opts.dbname_type = memory_db;
       app.opts.dbname_given = true;
     }
@@ -246,7 +246,7 @@ CMD_AUTOMATE_NO_STDIO(remote,
   if (opts.size() > 0)
     {
       ss << 'o';
-      for (unsigned int i=0; i < opts.size(); ++i)
+      for (unsigned int i = 0; i < opts.size(); ++i)
         {
           ss << opts.at(i).first.size()  << ':' << opts.at(i).first;
           ss << opts.at(i).second().size() << ':' << opts.at(i).second();
@@ -255,11 +255,11 @@ CMD_AUTOMATE_NO_STDIO(remote,
     }
 
   ss << 'l';
-  for (args_vector::size_type i=0; i<cleaned_args.size(); ++i)
-  {
+  for (args_vector::size_type i = 0; i < cleaned_args.size(); ++i)
+    {
       std::string arg = idx(cleaned_args, i)();
       ss << arg.size() << ':' << arg;
-  }
+    }
   ss << 'e';
 
   L(FL("stdio input: %s") % ss.str());
@@ -291,28 +291,28 @@ print_dryrun_info_cmd(protocol_role role
       if (counts->keys_in.can_have_more_than_min)
         {
           std::cout << (F("would receive %d revisions, %d certs, and at least %d keys\n")
-            % counts->revs_in.min_count
-            % counts->certs_in.min_count
-            % counts->keys_in.min_count);
+                        % counts->revs_in.min_count
+                        % counts->certs_in.min_count
+                        % counts->keys_in.min_count);
         }
       else
         {
           std::cout << (F("would receive %d revisions, %d certs, and %d keys\n")
-            % counts->revs_in.min_count
-            % counts->certs_in.min_count
-            % counts->keys_in.min_count);
+                        % counts->revs_in.min_count
+                        % counts->certs_in.min_count
+                        % counts->keys_in.min_count);
         }
     }
   if (role != sink_role)
     {
       std::cout << (F("would send %d certs and %d keys\n")
-        % counts->certs_out.min_count
-        % counts->keys_out.min_count);
+                    % counts->certs_out.min_count
+                    % counts->keys_out.min_count);
       std::cout <<
-        (FP("would send %d revisions\n", // 0 revisions; nothing following, so no trailing colon
-           "would send %d revisions:\n",
-           counts->revs_out.min_count + 1)
-        % counts->revs_out.min_count);
+                (FP("would send %d revisions\n", // 0 revisions; nothing following, so no trailing colon
+                    "would send %d revisions:\n",
+                    counts->revs_out.min_count + 1)
+                 % counts->revs_out.min_count);
       map<branch_name, int> branch_counts;
       for (vector<revision_id>::const_iterator i = counts->revs_out.items.begin();
            i != counts->revs_out.items.end(); ++i)
@@ -704,11 +704,11 @@ CMD_AUTOMATE(sync, N_("[URI]\n[ADDRESS[:
                                  netsync_connection, args, info);
 
   if (app.opts.set_default && workspace::found)
-  {
-    // Write workspace options, including key; this is the simplest way to
-    // fix a "found multiple keys" error reported by sync.
-    workspace::set_options(app.opts, app.lua);
-  }
+    {
+      // Write workspace options, including key; this is the simplest way to
+      // fix a "found multiple keys" error reported by sync.
+      workspace::set_options(app.opts, app.lua);
+    }
 
   shared_conn_counts counts = connection_counts::create();
   run_netsync_protocol(app, app.opts, app.lua, project, keys,
@@ -762,7 +762,7 @@ CMD_NO_WORKSPACE(clone, "clone", "", CMD
   arg_type server = idx(args, 0);
   arg_type workspace_arg;
 
-   if (url_arg)
+  if (url_arg)
     {
       E(!app.opts.exclude_given, origin::user,
         F("cannot use '--exclude' in URI mode"));
@@ -821,16 +821,16 @@ CMD_NO_WORKSPACE(clone, "clone", "", CMD
   if (!target_is_current_dir)
     {
       require_path_is_nonexistent
-        (workspace_dir,
-         F("clone destination directory '%s' already exists")
-         % workspace_dir);
+      (workspace_dir,
+       F("clone destination directory '%s' already exists")
+       % workspace_dir);
     }
 
   system_path _MTN_dir = workspace_dir / path_component("_MTN");
 
   require_path_is_nonexistent
-    (_MTN_dir, F("bookkeeping directory already exists in '%s'")
-     % workspace_dir);
+  (_MTN_dir, F("bookkeeping directory already exists in '%s'")
+   % workspace_dir);
 
   directory_cleanup_helper remove_on_fail(
     target_is_current_dir ? _MTN_dir : workspace_dir
@@ -883,7 +883,7 @@ CMD_NO_WORKSPACE(clone, "clone", "", CMD
       E(project.revision_is_in_branch(ident, app.opts.branch),
         origin::user,
         F("revision %s is not a member of branch '%s'")
-          % ident % app.opts.branch);
+        % ident % app.opts.branch);
     }
 
   roster_t empty_roster, current_roster;
@@ -928,10 +928,11 @@ struct pid_file
       return;
     pid_t pid;
     ifstream(path.as_external().c_str()) >> pid;
-    if (pid == get_process_id()) {
-      file.close();
-      delete_file(path);
-    }
+    if (pid == get_process_id())
+      {
+        file.close();
+        delete_file(path);
+      }
   }
 
 private:
============================================================
--- src/cmd_othervcs.cc	1b4be44b0e712375f9883e710a833dfac840c9d8
+++ src/cmd_othervcs.cc	6f1e87b6e3a4299dcbd516048255af97ffd2b7b1
@@ -112,7 +112,7 @@ CMD(git_export, "git_export", "", CMD_RE
 
   // remove marked revs from the set to be exported
   for (map<revision_id, size_t>::const_iterator
-         i = marked_revs.begin(); i != marked_revs.end(); ++i)
+       i = marked_revs.begin(); i != marked_revs.end(); ++i)
     revision_set.erase(i->first);
 
   vector<revision_id> revisions;
============================================================
--- src/cmd_packet.cc	12adecc7e55564b04e6a1a0541600d4fd170e8a8
+++ src/cmd_packet.cc	ca92628d9c4bc16e61fe562ab06234ba34bbc66e
@@ -76,7 +76,7 @@ CMD_AUTOMATE(put_public_key, N_("KEY-PAC
   key_store keys(app);
   key_packet_db_writer dbw(db, keys);
 
-  istringstream ss(idx(args,0)());
+  istringstream ss(idx(args, 0)());
   read_key_packets(ss, dbw);
 }
 
@@ -126,9 +126,9 @@ CMD_AUTOMATE(get_public_key, N_("KEY_NAM
 }
 
 CMD_AUTOMATE(get_public_key, N_("KEY_NAME_OR_HASH"),
-    N_("Prints a public key packet"),
-    "",
-    options::opts::none)
+             N_("Prints a public key packet"),
+             "",
+             options::opts::none)
 {
   E(args.size() == 1, origin::user,
     F("wrong argument count"));
@@ -257,7 +257,7 @@ CMD_AUTOMATE(read_packets, N_("PACKET-DA
   key_store keys(app);
   packet_db_writer dbw(db, keys);
 
-  istringstream ss(idx(args,0)());
+  istringstream ss(idx(args, 0)());
   read_packets(ss, dbw);
 }
 
============================================================
--- src/cmd_ws_commit.cc	d9dbacb820c6d070c4952ee2b0f143e61e85631e
+++ src/cmd_ws_commit.cc	6be2215b9897c62bc50655979079b35cd0dbf91b
@@ -92,9 +92,9 @@ public:
 
     size_t len = eol - offset;
     string line = message.substr(offset, len);
-    offset = eol+1;
+    offset = eol + 1;
 
-    if (message[eol] == '\r' && message.length() > eol+1 &&
+    if (message[eol] == '\r' && message.length() > eol + 1 &&
         message[eol+1] == '\n')
       offset++;
 
@@ -671,7 +671,7 @@ CMD(disapprove, "disapprove", "", CMD_RE
 
   edge_entry const & old_edge (*rev.edges.begin());
   db.get_revision_manifest(edge_old_revision(old_edge),
-                               rev_inverse.new_manifest);
+                           rev_inverse.new_manifest);
   {
     roster_t old_roster, new_roster;
     db.get_roster(edge_old_revision(old_edge), old_roster);
@@ -698,11 +698,12 @@ CMD(disapprove, "disapprove", "", CMD_RE
 
   project.get_branch_heads(app.opts.branch, heads,
                            app.opts.ignore_suspend_certs);
-  if (heads.size() > old_head_size && old_head_size > 0) {
-    P(F("note: this revision creates divergence\n"
-        "note: you may (or may not) wish to run '%s merge'")
-      % prog_name);
-  }
+  if (heads.size() > old_head_size && old_head_size > 0)
+    {
+      P(F("note: this revision creates divergence\n"
+          "note: you may (or may not) wish to run '%s merge'")
+        % prog_name);
+    }
   updater.maybe_do_update();
 }
 
@@ -725,7 +726,7 @@ CMD(mkdir, "mkdir", "", CMD_REF(workspac
     {
       file_path fp = file_path_external(*i);
       require_path_is_nonexistent
-        (fp, F("directory '%s' already exists") % fp);
+      (fp, F("directory '%s' already exists") % fp);
 
       // we'll treat this as a user (fatal) error.  it really wouldn't make
       // sense to add a dir to .mtn-ignore and then try to add it to the
@@ -765,7 +766,7 @@ void perform_add(app_state & app,
       work.find_unknown_and_ignored(db, mask, roots, paths, ignored);
 
       work.perform_additions(db, ignored,
-                                 add_recursive, !app.opts.no_ignore);
+                             add_recursive, !app.opts.no_ignore);
     }
   else
     paths = set<file_path>(roots.begin(), roots.end());
@@ -813,7 +814,7 @@ void perform_drop(app_state & app,
     }
 
   work.perform_deletions(db, paths,
-                             app.opts.recursive, app.opts.bookkeep_only);
+                         app.opts.recursive, app.opts.bookkeep_only);
 }
 CMD(drop, "drop", "rm", CMD_REF(workspace), N_("[PATH]..."),
     N_("Drops files from the workspace"),
@@ -847,7 +848,7 @@ CMD(rename, "rename", "mv", CMD_REF(work
   file_path dst_path = file_path_external(dstr);
 
   set<file_path> src_paths;
-  for (size_t i = 0; i < args.size()-1; i++)
+  for (size_t i = 0; i < args.size() - 1; i++)
     {
       file_path s = file_path_external(idx(args, i));
       src_paths.insert(s);
@@ -1081,7 +1082,7 @@ checkout_common(app_state & app,
 
     if (!checkout_dot)
       require_path_is_nonexistent
-        (dir, F("checkout directory '%s' already exists") % dir);
+      (dir, F("checkout directory '%s' already exists") % dir);
   }
 
   workspace::create_workspace(app.opts, app.lua, dir);
@@ -1123,12 +1124,12 @@ CMD_AUTOMATE(checkout, N_("[DIRECTORY]")
 }
 
 CMD_AUTOMATE(checkout, N_("[DIRECTORY]"),
-    N_("Checks out a revision from the database into a directory"),
-    N_("If a revision is given, that's the one that will be checked out.  "
-       "Otherwise, it will be the head of the branch (given or implicit).  "
-       "If no directory is given, the branch name will be used as directory."),
-    options::opts::branch | options::opts::revision |
-    options::opts::move_conflicting_paths)
+             N_("Checks out a revision from the database into a directory"),
+             N_("If a revision is given, that's the one that will be checked out.  "
+                "Otherwise, it will be the head of the branch (given or implicit).  "
+                "If no directory is given, the branch name will be used as directory."),
+             options::opts::branch | options::opts::revision |
+             options::opts::move_conflicting_paths)
 {
   E(args.size() < 2, origin::user,
     F("wrong argument count"));
@@ -1345,7 +1346,7 @@ CMD_AUTOMATE(get_attributes, N_("PATH"),
   workspace work(app);
 
   // retrieve the path
-  file_path path = file_path_external(idx(args,0));
+  file_path path = file_path_external(idx(args, 0));
 
   roster_t base, current;
   parent_map parents;
@@ -1368,70 +1369,70 @@ CMD_AUTOMATE(get_attributes, N_("PATH"),
   const_node_t n = current.get_node(path);
   for (attr_map_t::const_iterator i = n->attrs.begin();
        i != n->attrs.end(); ++i)
-  {
-    std::string value(i->second.second());
-    std::string state;
+    {
+      std::string value(i->second.second());
+      std::string state;
 
-    // if if the first value of the value pair is false this marks a
-    // dropped attribute
-    if (!i->second.first)
-      {
-        // if the attribute is dropped, we should have a base roster
-        // with that node. we need to check that for the attribute as well
-        // because if it is dropped there as well it was already deleted
-        // in any previous revision
-        I(base.has_node(path));
+      // if if the first value of the value pair is false this marks a
+      // dropped attribute
+      if (!i->second.first)
+        {
+          // if the attribute is dropped, we should have a base roster
+          // with that node. we need to check that for the attribute as well
+          // because if it is dropped there as well it was already deleted
+          // in any previous revision
+          I(base.has_node(path));
 
-        const_node_t prev_node = base.get_node(path);
+          const_node_t prev_node = base.get_node(path);
 
-        // find the attribute in there
-        attr_map_t::const_iterator j = prev_node->attrs.find(i->first);
-        I(j != prev_node->attrs.end());
+          // find the attribute in there
+          attr_map_t::const_iterator j = prev_node->attrs.find(i->first);
+          I(j != prev_node->attrs.end());
 
-        // was this dropped before? then ignore it
-        if (!j->second.first) { continue; }
+          // was this dropped before? then ignore it
+          if (!j->second.first) { continue; }
 
-        state = "dropped";
-        // output the previous (dropped) value later
-        value = j->second.second();
-      }
-    // this marks either a new or an existing attribute
-    else
-      {
-        if (base.has_node(path))
-          {
-            const_node_t prev_node = base.get_node(path);
-            attr_map_t::const_iterator j =
-              prev_node->attrs.find(i->first);
+          state = "dropped";
+          // output the previous (dropped) value later
+          value = j->second.second();
+        }
+      // this marks either a new or an existing attribute
+      else
+        {
+          if (base.has_node(path))
+            {
+              const_node_t prev_node = base.get_node(path);
+              attr_map_t::const_iterator j =
+                prev_node->attrs.find(i->first);
 
-            // the attribute is new if it either hasn't been found
-            // in the previous roster or has been deleted there
-            if (j == prev_node->attrs.end() || !j->second.first)
-              {
-                state = "added";
-              }
-            // check if the attribute's value has been changed
-            else if (i->second.second() != j->second.second())
-              {
-                state = "changed";
-              }
-            else
-              {
-                state = "unchanged";
-              }
-          }
-        // its added since the whole node has been just added
-        else
-          {
-            state = "added";
-          }
-      }
+              // the attribute is new if it either hasn't been found
+              // in the previous roster or has been deleted there
+              if (j == prev_node->attrs.end() || !j->second.first)
+                {
+                  state = "added";
+                }
+              // check if the attribute's value has been changed
+              else if (i->second.second() != j->second.second())
+                {
+                  state = "changed";
+                }
+              else
+                {
+                  state = "unchanged";
+                }
+            }
+          // its added since the whole node has been just added
+          else
+            {
+              state = "added";
+            }
+        }
 
-    basic_io::stanza st;
-    st.push_str_triple(basic_io::syms::attr, i->first(), value);
-    st.push_str_pair(symbol("state"), state);
-    pr.print_stanza(st);
-  }
+      basic_io::stanza st;
+      st.push_str_triple(basic_io::syms::attr, i->first(), value);
+      st.push_str_pair(symbol("state"), state);
+      pr.print_stanza(st);
+    }
 
   // print the output
   output.write(pr.buf.data(), pr.buf.size());
@@ -1474,7 +1475,7 @@ CMD_AUTOMATE(drop_attribute, N_("PATH [K
              "",
              options::opts::none)
 {
-  E(args.size() ==1 || args.size() == 2, origin::user,
+  E(args.size() == 1 || args.size() == 2, origin::user,
     F("wrong argument count"));
 
   drop_attr(app, args);
@@ -1664,7 +1665,7 @@ void perform_commit(app_state & app,
             cset const & cs = edge_changes(edge);
 
             for (map<file_path, pair<file_id, file_id> >::const_iterator
-                   i = cs.deltas_applied.begin();
+                 i = cs.deltas_applied.begin();
                  i != cs.deltas_applied.end(); ++i)
               {
                 file_path path = i->first;
@@ -1697,8 +1698,8 @@ void perform_commit(app_state & app,
                     delta del;
                     diff(old_data.inner(), new_data, del);
                     db.put_file_version(old_content,
-                                            new_content,
-                                            file_delta(del));
+                                        new_content,
+                                        file_delta(del));
                   }
                 else
                   // If we don't err out here, the database will later.
@@ -1708,7 +1709,7 @@ void perform_commit(app_state & app,
               }
 
             for (map<file_path, file_id>::const_iterator
-                   i = cs.files_added.begin();
+                 i = cs.files_added.begin();
                  i != cs.files_added.end(); ++i)
               {
                 file_path path = i->first;
@@ -1767,11 +1768,12 @@ void perform_commit(app_state & app,
 
   project.get_branch_heads(app.opts.branch, heads,
                            app.opts.ignore_suspend_certs);
-  if (heads.size() > old_head_size && old_head_size > 0) {
-    P(F("note: this revision creates divergence\n"
-        "note: you may (or may not) wish to run '%s merge'")
-      % prog_name);
-  }
+  if (heads.size() > old_head_size && old_head_size > 0)
+    {
+      P(F("note: this revision creates divergence\n"
+          "note: you may (or may not) wish to run '%s merge'")
+        % prog_name);
+    }
 
   work.maybe_update_inodeprints(db);
 
@@ -1819,9 +1821,9 @@ CMD_NO_WORKSPACE(setup, "setup", "", CMD
 }
 
 CMD_NO_WORKSPACE(setup, "setup", "", CMD_REF(tree), N_("[DIRECTORY]"),
-    N_("Sets up a new workspace directory"),
-    N_("If no directory is specified, uses the current directory."),
-    options::opts::branch)
+                 N_("Sets up a new workspace directory"),
+                 N_("If no directory is specified, uses the current directory."),
+                 options::opts::branch)
 {
   if (args.size() > 1)
     throw usage(execid);
@@ -1831,16 +1833,16 @@ CMD_NO_WORKSPACE(setup, "setup", "", CMD
 
   string dir;
   if (args.size() == 1)
-      dir = idx(args,0)();
+    dir = idx(args, 0)();
   else
-      dir = ".";
+    dir = ".";
 
   system_path workspace_dir(dir, origin::user);
   system_path _MTN_dir(workspace_dir / bookkeeping_root_component);
 
   require_path_is_nonexistent
-    (_MTN_dir, F("bookkeeping directory already exists in '%s'")
-     % workspace_dir);
+  (_MTN_dir, F("bookkeeping directory already exists in '%s'")
+   % workspace_dir);
 
   // only try to remove the complete workspace directory
   // if we're about to create it anyways
@@ -1866,13 +1868,13 @@ CMD_NO_WORKSPACE(import, "import", "", C
 }
 
 CMD_NO_WORKSPACE(import, "import", "", CMD_REF(tree), N_("DIRECTORY"),
-  N_("Imports the contents of a directory into a branch"),
-  "",
-  options::opts::branch | options::opts::revision |
-  options::opts::messages |
-  options::opts::dryrun |
-  options::opts::no_ignore | options::opts::exclude |
-  options::opts::author | options::opts::date)
+                 N_("Imports the contents of a directory into a branch"),
+                 "",
+                 options::opts::branch | options::opts::revision |
+                 options::opts::messages |
+                 options::opts::dryrun |
+                 options::opts::no_ignore | options::opts::exclude |
+                 options::opts::author | options::opts::date)
 {
   revision_id ident;
   system_path dir;
@@ -1921,14 +1923,14 @@ CMD_NO_WORKSPACE(import, "import", "", C
 
   dir = system_path(idx(args, 0));
   require_path_is_directory
-    (dir,
-     F("import directory '%s' doesn't exists") % dir,
-     F("import directory '%s' is a file") % dir);
+  (dir,
+   F("import directory '%s' doesn't exists") % dir,
+   F("import directory '%s' is a file") % dir);
 
   system_path _MTN_dir = dir / path_component("_MTN");
 
   require_path_is_nonexistent
-    (_MTN_dir, F("bookkeeping directory already exists in '%s'") % dir);
+  (_MTN_dir, F("bookkeeping directory already exists in '%s'") % dir);
 
   directory_cleanup_helper remove_on_fail(_MTN_dir);
 
@@ -1961,9 +1963,9 @@ CMD_NO_WORKSPACE(import, "import", "", C
   // commit
   if (!app.opts.dryrun)
     {
-        perform_commit(app, db, work, project,
-                       make_command_id("workspace commit"),
-                       vector<file_path>());
+      perform_commit(app, db, work, project,
+                     make_command_id("workspace commit"),
+                     vector<file_path>());
       remove_on_fail.commit();
     }
   else
@@ -1975,10 +1977,10 @@ CMD_NO_WORKSPACE(migrate_workspace, "mig
 }
 
 CMD_NO_WORKSPACE(migrate_workspace, "migrate_workspace", "", CMD_REF(tree),
-  N_("[DIRECTORY]"),
-  N_("Migrates a workspace directory's metadata to the latest format"),
-  N_("If no directory is given, defaults to the current workspace."),
-  options::opts::none)
+                 N_("[DIRECTORY]"),
+                 N_("Migrates a workspace directory's metadata to the latest format"),
+                 N_("If no directory is given, defaults to the current workspace."),
+                 options::opts::none)
 {
   if (args.size() > 1)
     throw usage(execid);
@@ -2252,8 +2254,8 @@ operator<<(std::ostream & os,
       // update command to rerun a selection and update based on current
       // bisect information
       I(false);
-    break;
-  }
+      break;
+    }
   return os;
 }
 
============================================================
--- src/netxx_pipe.cc	ca933b395822bb54de954e82fce6e1f012037cf9
+++ src/netxx_pipe.cc	a823fe1d4f1171e07d596ae58852a42907a89d39
@@ -35,7 +35,7 @@ Netxx::PipeStream::PipeStream(int _readf
 using std::strerror;
 
 Netxx::PipeStream::PipeStream(int _readfd, int _writefd)
-    :
+  :
 #ifdef WIN32
   child(INVALID_HANDLE_VALUE),
   bytes_available(0),
@@ -174,11 +174,11 @@ Netxx::PipeStream::PipeStream (const str
   // a vector<string> as argument.
 
   const unsigned newsize = 64;
-  const char *newargv[newsize];
+  const char * newargv[newsize];
   I(args.size() < (sizeof(newargv) / sizeof(newargv[0])));
 
   unsigned newargc = 0;
-  newargv[newargc++]=cmd.c_str();
+  newargv[newargc++] = cmd.c_str();
   for (vector<string>::const_iterator i = args.begin();
        i != args.end(); ++i)
     newargv[newargc++] = i->c_str();
@@ -195,8 +195,8 @@ Netxx::PipeStream::PipeStream (const str
 
   static unsigned long serial = 0;
   string pipename = (FL("\\\\.\\pipe\\netxx_pipe_%ld_%d")
-                          % GetCurrentProcessId()
-                          % (++serial)).str();
+                     % GetCurrentProcessId()
+                     % (++serial)).str();
 
   // Create the parent's handle to the named pipe.
 
@@ -216,15 +216,15 @@ Netxx::PipeStream::PipeStream (const str
   // Open the child's handle to the named pipe.
 
   SECURITY_ATTRIBUTES inherit;
-  memset(&inherit,0,sizeof inherit);
-  inherit.nLength=sizeof inherit;
+  memset(&inherit, 0, sizeof inherit);
+  inherit.nLength = sizeof inherit;
   inherit.bInheritHandle = TRUE;
 
   HANDLE hpipe = CreateFile(pipename.c_str(),
-                            GENERIC_READ|GENERIC_WRITE, 0,
+                            GENERIC_READ | GENERIC_WRITE, 0,
                             &inherit,
                             OPEN_EXISTING,
-                            FILE_ATTRIBUTE_NORMAL|FILE_FLAG_OVERLAPPED,0);
+                            FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, 0);
 
   E(hpipe != INVALID_HANDLE_VALUE, origin::system,
     F("CreateFile(%s,...) call failed: %s")
@@ -248,7 +248,7 @@ Netxx::PipeStream::PipeStream (const str
   L(FL("Subprocess command line: '%s'") % cmdline);
 
   BOOL started = CreateProcess(NULL, // Application name
-                               const_cast<CHAR*>(cmdline.c_str()),
+                               const_cast<CHAR *>(cmdline.c_str()),
                                NULL, // Process attributes
                                NULL, // Thread attributes
                                TRUE, // Inherit handles
@@ -293,7 +293,7 @@ Netxx::signed_size_type
 // Non blocking read.
 
 Netxx::signed_size_type
-Netxx::PipeStream::read (void *buffer, size_type length)
+Netxx::PipeStream::read (void * buffer, size_type length)
 {
 #ifdef WIN32
 
@@ -304,7 +304,7 @@ Netxx::PipeStream::read (void *buffer, s
     {
       memcpy(buffer, readbuf, length);
       if (length < bytes_available)
-        memmove(readbuf, readbuf+length, bytes_available-length);
+        memmove(readbuf, readbuf + length, bytes_available - length);
       bytes_available -= length;
     }
 
@@ -315,7 +315,7 @@ Netxx::signed_size_type
 }
 
 Netxx::signed_size_type
-Netxx::PipeStream::write(const void *buffer, size_type length)
+Netxx::PipeStream::write(const void * buffer, size_type length)
 {
 #ifdef WIN32
   DWORD written = 0;
@@ -353,7 +353,7 @@ Netxx::PipeStream::close (void)
   writefd = -1;
 
   if (child)
-    while (waitpid(child,0,0) == -1 && errno == EINTR) ;
+    while (waitpid(child, 0, 0) == -1 && errno == EINTR) ;
   child = 0;
 #endif
 }
@@ -379,17 +379,18 @@ status_name(DWORD wstatus)
 static string
 status_name(DWORD wstatus)
 {
-  switch (wstatus) {
-  case WAIT_TIMEOUT: return "WAIT_TIMEOUT";
-  case WAIT_OBJECT_0: return "WAIT_OBJECT_0";
-  case WAIT_FAILED: return "WAIT_FAILED";
-  case WAIT_OBJECT_0+1: return "WAIT_OBJECT_0+1";
-  default: return "UNKNOWN";
-  }
+  switch (wstatus)
+    {
+    case WAIT_TIMEOUT: return "WAIT_TIMEOUT";
+    case WAIT_OBJECT_0: return "WAIT_OBJECT_0";
+    case WAIT_FAILED: return "WAIT_FAILED";
+    case WAIT_OBJECT_0+1: return "WAIT_OBJECT_0+1";
+    default: return "UNKNOWN";
+    }
 }
 
 Netxx::Probe::result_type
-Netxx::PipeCompatibleProbe::ready(const Timeout &timeout, ready_type rt)
+Netxx::PipeCompatibleProbe::ready(const Timeout & timeout, ready_type rt)
 {
   if (!is_pipe)
     return Probe::ready(timeout, rt);
@@ -492,7 +493,7 @@ void
 }
 
 void
-Netxx::PipeCompatibleProbe::add(PipeStream &ps, ready_type rt)
+Netxx::PipeCompatibleProbe::add(PipeStream & ps, ready_type rt)
 {
   assert(!is_pipe);
   assert(!pipe);
@@ -502,7 +503,7 @@ void
 }
 
 void
-Netxx::PipeCompatibleProbe::add(StreamBase const &sb, ready_type rt)
+Netxx::PipeCompatibleProbe::add(StreamBase const & sb, ready_type rt)
 {
   // FIXME: This is *still* an unfortunate way of performing a
   // downcast, though slightly less awful than the old way, which
@@ -510,50 +511,50 @@ Netxx::PipeCompatibleProbe::add(StreamBa
   //
   // Perhaps we should twiddle the caller-visible API.
 
-  StreamBase const *sbp = &sb;
-  PipeStream const *psp = dynamic_cast<PipeStream const *>(sbp);
+  StreamBase const * sbp = &sb;
+  PipeStream const * psp = dynamic_cast<PipeStream const *>(sbp);
   if (psp)
-    add(const_cast<PipeStream&>(*psp),rt);
+    add(const_cast<PipeStream &>(*psp), rt);
   else
     {
       assert(!is_pipe);
-      Probe::add(sb,rt);
+      Probe::add(sb, rt);
     }
 }
 
 void
-Netxx::PipeCompatibleProbe::add(const StreamServer &ss, ready_type rt)
+Netxx::PipeCompatibleProbe::add(const StreamServer & ss, ready_type rt)
 {
   assert(!is_pipe);
-  Probe::add(ss,rt);
+  Probe::add(ss, rt);
 }
 #else // unix
 void
-Netxx::PipeCompatibleProbe::add(PipeStream &ps, ready_type rt)
-  {
-    if (rt == ready_none || rt & ready_read)
-      add_socket(ps.get_readfd(), ready_read);
-    if (rt == ready_none || rt & ready_write)
-      add_socket(ps.get_writefd(), ready_write);
-  }
+Netxx::PipeCompatibleProbe::add(PipeStream & ps, ready_type rt)
+{
+  if (rt == ready_none || rt & ready_read)
+    add_socket(ps.get_readfd(), ready_read);
+  if (rt == ready_none || rt & ready_write)
+    add_socket(ps.get_writefd(), ready_write);
+}
 
 void
-Netxx::PipeCompatibleProbe::add(const StreamBase &sb, ready_type rt)
+Netxx::PipeCompatibleProbe::add(const StreamBase & sb, ready_type rt)
 {
   try
     {
-      add(const_cast<PipeStream&>(dynamic_cast<const PipeStream&>(sb)),rt);
+      add(const_cast<PipeStream &>(dynamic_cast<const PipeStream &>(sb)), rt);
     }
   catch (...)
     {
-      Probe::add(sb,rt);
+      Probe::add(sb, rt);
     }
 }
 
 void
-Netxx::PipeCompatibleProbe::add(const StreamServer &ss, ready_type rt)
+Netxx::PipeCompatibleProbe::add(const StreamServer & ss, ready_type rt)
 {
-  Probe::add(ss,rt);
+  Probe::add(ss, rt);
 }
 #endif
 
============================================================
--- src/netxx_pipe.hh	f0b5ed2e06bcfd5dce87d3a5b59d0ec145c3228c
+++ src/netxx_pipe.hh	aada41549bd6b3a922c6ea62fbf1fc1eec655fd1
@@ -41,93 +41,93 @@ namespace Netxx
 */
 
 namespace Netxx
-  {
+{
   class PipeCompatibleProbe;
   class StreamServer;
 
   class PipeStream : public StreamBase
-    {
+  {
 #ifdef WIN32
-      HANDLE named_pipe;
-      HANDLE child;
-      char readbuf[1024];
-      DWORD bytes_available;
-      bool read_in_progress;
-      OVERLAPPED overlap;
-      friend class PipeCompatibleProbe;
+    HANDLE named_pipe;
+    HANDLE child;
+    char readbuf[1024];
+    DWORD bytes_available;
+    bool read_in_progress;
+    OVERLAPPED overlap;
+    friend class PipeCompatibleProbe;
 #else
-      int readfd, writefd;
-      int child;
+    int readfd, writefd;
+    int child;
 #endif
 
 
-    public:
-      // do we need Timeout for symmetry with Stream?
-      explicit PipeStream (int readfd, int writefd);
-      explicit PipeStream (const std::string &cmd, const std::vector<std::string> &args);
-      virtual ~PipeStream() { close(); }
-      virtual signed_size_type read (void *buffer, size_type length);
-      virtual signed_size_type write (const void *buffer, size_type length);
-      virtual void close (void);
-      virtual socket_type get_socketfd (void) const;
-      virtual const ProbeInfo* get_probe_info (void) const;
-      int get_readfd(void) const
-        {
+  public:
+    // do we need Timeout for symmetry with Stream?
+    explicit PipeStream (int readfd, int writefd);
+    explicit PipeStream (const std::string & cmd, const std::vector<std::string> &args);
+    virtual ~PipeStream() { close(); }
+    virtual signed_size_type read (void * buffer, size_type length);
+    virtual signed_size_type write (const void * buffer, size_type length);
+    virtual void close (void);
+    virtual socket_type get_socketfd (void) const;
+    virtual const ProbeInfo * get_probe_info (void) const;
+    int get_readfd(void) const
+    {
 #ifdef WIN32
-          return -1;
+      return -1;
 #else
-          return readfd;
+      return readfd;
 #endif
-        }
-      int get_writefd(void) const
-        {
+    }
+    int get_writefd(void) const
+    {
 #ifdef WIN32
-          return -1;
+      return -1;
 #else
-          return writefd;
+      return writefd;
 #endif
-        }
-    };
+    }
+  };
 
 #ifdef WIN32
 
   // This probe can either handle _one_ PipeStream or several network
   // Streams so if !is_pipe this acts like a Probe.
   class PipeCompatibleProbe : public Probe
+  {
+    bool is_pipe;
+    // only meaningful if is_pipe is true
+    PipeStream * pipe;
+    ready_type ready_t;
+  public:
+    PipeCompatibleProbe() : is_pipe(), pipe(), ready_t()
+    {}
+    void clear()
     {
-      bool is_pipe;
-      // only meaningful if is_pipe is true
-      PipeStream *pipe;
-      ready_type ready_t;
-    public:
-      PipeCompatibleProbe() : is_pipe(), pipe(), ready_t()
-      {}
-      void clear()
-      {
-        if (is_pipe)
-          {
-            pipe=0;
-            is_pipe=false;
-          }
-        else
-          Probe::clear();
-      }
-      // This function does all the hard work (emulating a select).
-      result_type ready(const Timeout &timeout=Timeout(), ready_type rt=ready_none);
-      void add(PipeStream &ps, ready_type rt=ready_none);
-      void add(const StreamBase &sb, ready_type rt=ready_none);
-      void add(const StreamServer &ss, ready_type rt=ready_none);
-    };
+      if (is_pipe)
+        {
+          pipe = 0;
+          is_pipe = false;
+        }
+      else
+        Probe::clear();
+    }
+    // This function does all the hard work (emulating a select).
+    result_type ready(const Timeout & timeout = Timeout(), ready_type rt = ready_none);
+    void add(PipeStream & ps, ready_type rt = ready_none);
+    void add(const StreamBase & sb, ready_type rt = ready_none);
+    void add(const StreamServer & ss, ready_type rt = ready_none);
+  };
 #else
 
   // We only act specially if a PipeStream is added (directly or via
   // the StreamBase parent reference).
   struct PipeCompatibleProbe : Probe
-    {
-      void add(PipeStream &ps, ready_type rt=ready_none);
-      void add(const StreamBase &sb, ready_type rt=ready_none);
-      void add(const StreamServer &ss, ready_type rt=ready_none);
-    };
+  {
+    void add(PipeStream & ps, ready_type rt = ready_none);
+    void add(const StreamBase & sb, ready_type rt = ready_none);
+    void add(const StreamServer & ss, ready_type rt = ready_none);
+  };
 #endif
 
 }
============================================================
--- src/uri.cc	f50e7599f6294a9872c055a8b230ea4933bdcc82
+++ src/uri.cc	ceac570deba6cb715b8a8f6ffea5ec92ce159fb5
@@ -78,8 +78,7 @@ parse_uri(string const & in, uri_t & uri
           uri.host.assign(hostlike_matches[4]);
 
         }
-      else
-      if (!hostlike_matches[5].empty())
+      else if (!hostlike_matches[5].empty())
         {
           // for IPv6 we discard the square brackets
           uri.host.assign(hostlike_matches[5]);
@@ -140,8 +139,7 @@ urldecode(string const & in, origin::typ
     {
       if (*i == '+')
         out += ' ';
-      else
-      if (*i != '%')
+      else if (*i != '%')
         out += *i;
       else
         {
============================================================
--- src/lua.cc	40d590a9716e281f2004c1b5eb19c2b0926e7dde
+++ src/lua.cc	be3fdb610d743d62a3cc80d47b9332f86cbfe430
@@ -39,30 +39,32 @@ dump_stack(lua_State * st)
   string out;
   int i;
   int top = lua_gettop(st);
-  for (i = 1; i <= top; i++) {  /* repeat for each level */
-    int t = lua_type(st, i);
-    switch (t) {
-    case LUA_TSTRING:  /* strings */
-      out += '`';
-      out += string(lua_tostring(st, i), lua_strlen(st, i));
-      out += '\'';
-      break;
+  for (i = 1; i <= top; i++)    /* repeat for each level */
+    {
+      int t = lua_type(st, i);
+      switch (t)
+        {
+        case LUA_TSTRING:  /* strings */
+          out += '`';
+          out += string(lua_tostring(st, i), lua_strlen(st, i));
+          out += '\'';
+          break;
 
-    case LUA_TBOOLEAN:  /* booleans */
-      out += (lua_toboolean(st, i) ? "true" : "false");
-      break;
+        case LUA_TBOOLEAN:  /* booleans */
+          out += (lua_toboolean(st, i) ? "true" : "false");
+          break;
 
-    case LUA_TNUMBER:  /* numbers */
-      out += (FL("%g") % lua_tonumber(st, i)).str();
-      break;
+        case LUA_TNUMBER:  /* numbers */
+          out += (FL("%g") % lua_tonumber(st, i)).str();
+          break;
 
-    default:  /* other values */
-      out += std::string(lua_typename(st, t));
-      break;
+        default:  /* other values */
+          out += std::string(lua_typename(st, t));
+          break;
 
+        }
+      out += "  ";  /* put a separator */
     }
-    out += "  ";  /* put a separator */
-  }
   return out;
 }
 
@@ -358,7 +360,7 @@ Lua &
 }
 
 Lua &
-Lua::set_field(const string& key, int idx)
+Lua::set_field(const string & key, int idx)
 {
   if (failed) return *this;
   lua_setfield(st, idx, key.c_str());
@@ -480,7 +482,7 @@ LUAEXT(include, )
 
 LUAEXT(include, )
 {
-  const char *path = luaL_checkstring(LS, -1);
+  const char * path = luaL_checkstring(LS, -1);
   E(path, origin::user,
     F("%s called with an invalid parameter") % "Include");
 
@@ -492,7 +494,7 @@ LUAEXT(includedir, )
 
 LUAEXT(includedir, )
 {
-  const char *pathstr = luaL_checkstring(LS, -1);
+  const char * pathstr = luaL_checkstring(LS, -1);
   E(pathstr, origin::user,
     F("%s called with an invalid parameter") % "IncludeDir");
 
@@ -503,8 +505,8 @@ LUAEXT(includedirpattern, )
 
 LUAEXT(includedirpattern, )
 {
-  const char *pathstr = luaL_checkstring(LS, -2);
-  const char *pattern = luaL_checkstring(LS, -1);
+  const char * pathstr = luaL_checkstring(LS, -2);
+  const char * pattern = luaL_checkstring(LS, -1);
   E(pathstr && pattern, origin::user,
     F("%s called with an invalid parameter") % "IncludeDirPattern");
 
@@ -515,23 +517,26 @@ LUAEXT(search, regex)
 
 LUAEXT(search, regex)
 {
-  const char *re = luaL_checkstring(LS, -2);
-  const char *str = luaL_checkstring(LS, -1);
+  const char * re = luaL_checkstring(LS, -2);
+  const char * str = luaL_checkstring(LS, -1);
 
   bool result = false;
-  try {
-    result = pcre::regex(re, origin::user).match(str, origin::user);
-  } catch (recoverable_failure & e) {
-    lua_pushstring(LS, e.what());
-    return lua_error(LS);
-  }
+  try
+    {
+      result = pcre::regex(re, origin::user).match(str, origin::user);
+    }
+  catch (recoverable_failure & e)
+    {
+      lua_pushstring(LS, e.what());
+      return lua_error(LS);
+    }
   lua_pushboolean(LS, result);
   return 1;
 }
 
 LUAEXT(gettext, )
 {
-  const char *msgid = luaL_checkstring(LS, -1);
+  const char * msgid = luaL_checkstring(LS, -1);
   lua_pushstring(LS, gettext(msgid));
   return 1;
 }
@@ -543,7 +548,7 @@ run_string(lua_State * st, char const * 
   return
     Lua(st)
     .loadstring(str, identity)
-    .call(0,1)
+    .call(0, 1)
     .ok();
 }
 
@@ -554,7 +559,7 @@ run_file(lua_State * st, char const * fi
   return
     Lua(st)
     .loadfile(filename)
-    .call(0,1)
+    .call(0, 1)
     .ok();
 }
 
@@ -606,13 +611,13 @@ run_directory(lua_State * st, char const
   }
 
   sort(arr.begin(), arr.end());
-  for (vector<string>::iterator i= arr.begin(); i != arr.end(); ++i)
+  for (vector<string>::iterator i = arr.begin(); i != arr.end(); ++i)
     {
       L(FL("opening rcfile '%s'") % *i);
       bool res = Lua(st)
-        .loadfile(i->c_str())
-        .call(0,1)
-        .ok();
+                 .loadfile(i->c_str())
+                 .call(0, 1)
+                 .ok();
       E(res, origin::user, F("lua error while loading rcfile '%s'") % *i);
       L(FL("'%s' is ok") % *i);
     }
============================================================
--- src/lua.hh	146872f14cc23c6179c37bcda8667a0ecd178826
+++ src/lua.hh	9e644f9cd3750b68f9ea7d954428fd3ba7ac6b74
@@ -23,7 +23,7 @@ struct
 // destructed, so no need to pop values when you're done.
 
 struct
-Lua
+  Lua
 {
   lua_State * st;
   bool failed;
@@ -80,9 +80,9 @@ namespace luaext
 
 namespace luaext
 {
-  typedef std::map<std::string, int (*)(lua_State*)> fmap;
+  typedef std::map<std::string, int ( *)(lua_State *)> fmap;
   typedef std::map<std::string, fmap> ftmap;
-  extern ftmap *fns;
+  extern ftmap * fns;
   struct extfn
   {
     extfn(std::string const & name, std::string const & table,
============================================================
--- src/charset.cc	bb433fcc004c011696478d5c9d5626b2c72c78e7
+++ src/charset.cc	89295ac94fc9e2e8e8b3e8ac434da88e0ca86573
@@ -70,10 +70,10 @@ charset_convert(string const & src_chars
 
       E(converted != NULL, whence,
         F("failed to convert string from %s to %s: '%s'")
-         % src_charset % dst_charset % src);
+        % src_charset % dst_charset % src);
       dst = string(converted);
       if (converted != src.c_str())
-        free(const_cast<char*>(converted));
+        free(const_cast<char *>(converted));
     }
 }
 
@@ -255,55 +255,55 @@ utf8_validate(utf8 const & utf)
 
   for (string::const_iterator i = utf().begin();
        i != utf().end(); ++i, --left)
-  {
-    u8 c = *i;
-    if (c < 128)
-      continue;
-    if ((c & 0xe0) == 0xc0)
     {
-      if (left < 2)
-        return false;
-      if ((c & 0x1e) == 0)
-        return false;
-      ++i; --left; c = *i;
-      if ((c & 0xc0) != 0x80)
-        return false;
-    }
-    else
-    {
-      if ((c & 0xf0) == 0xe0)
-      {
-        if (left < 3)
-          return false;
-        min = 1 << 11;
-        val = c & 0x0f;
-        goto two_remaining;
-      }
-      else if ((c & 0xf8) == 0xf0)
-      {
-        if (left < 4)
-          return false;
-        min = 1 << 16;
-        val = c & 0x07;
-      }
+      u8 c = *i;
+      if (c < 128)
+        continue;
+      if ((c & 0xe0) == 0xc0)
+        {
+          if (left < 2)
+            return false;
+          if ((c & 0x1e) == 0)
+            return false;
+          ++i; --left; c = *i;
+          if ((c & 0xc0) != 0x80)
+            return false;
+        }
       else
-        return false;
-      ++i; --left; c = *i;
-      if (!utf8_consume_continuation_char(c, val))
-        return false;
+        {
+          if ((c & 0xf0) == 0xe0)
+            {
+              if (left < 3)
+                return false;
+              min = 1 << 11;
+              val = c & 0x0f;
+              goto two_remaining;
+            }
+          else if ((c & 0xf8) == 0xf0)
+            {
+              if (left < 4)
+                return false;
+              min = 1 << 16;
+              val = c & 0x07;
+            }
+          else
+            return false;
+          ++i; --left; c = *i;
+          if (!utf8_consume_continuation_char(c, val))
+            return false;
 two_remaining:
-      ++i; --left; c = *i;
-      if (!utf8_consume_continuation_char(c, val))
-        return false;
-      ++i; --left; c = *i;
-      if (!utf8_consume_continuation_char(c, val))
-        return false;
-      if (val < min)
-        return false;
-      if (!is_valid_unicode_char(val))
-        return false;
+          ++i; --left; c = *i;
+          if (!utf8_consume_continuation_char(c, val))
+            return false;
+          ++i; --left; c = *i;
+          if (!utf8_consume_continuation_char(c, val))
+            return false;
+          if (val < min)
+            return false;
+          if (!is_valid_unicode_char(val))
+            return false;
+        }
     }
-  }
   return true;
 }
 
@@ -330,7 +330,7 @@ ace_to_utf8(string const & a, utf8 & utf
 void
 ace_to_utf8(string const & a, utf8 & utf, origin::type whence)
 {
-  char *out = NULL;
+  char * out = NULL;
   L(FL("converting %d bytes from IDNA ACE to UTF-8") % a.size());
   int res = idna_to_unicode_8z8z(a.c_str(), &out, IDNA_USE_STD3_ASCII_RULES);
   E(res == IDNA_SUCCESS || res == IDNA_NO_ACE_PREFIX, whence,
@@ -344,7 +344,7 @@ utf8_to_ace(utf8 const & utf, string & a
 void
 utf8_to_ace(utf8 const & utf, string & a)
 {
-  char *out = NULL;
+  char * out = NULL;
   L(FL("converting %d bytes from UTF-8 to IDNA ACE") % utf().size());
   int res = idna_to_ascii_8z(utf().c_str(), &out, IDNA_USE_STD3_ASCII_RULES);
   E(res == IDNA_SUCCESS, utf.made_from,
============================================================
--- src/simplestring_xform.cc	e779da92dc0c8e2f09d5800d26073d4d2ca5d130
+++ src/simplestring_xform.cc	bcf82fe5bf8eb104f827c247f5f4e84a4d66d5a1
@@ -25,7 +25,7 @@ struct
 using std::transform;
 
 struct
-lowerize
+  lowerize
 {
   char operator()(unsigned char const & c) const
   {
@@ -42,7 +42,7 @@ struct
 }
 
 struct
-upperize
+  upperize
 {
   char operator()(unsigned char const & c) const
   {
@@ -101,16 +101,16 @@ void split_into_lines(string const & in,
           string::size_type next_begin;
 
           if (in.at(end) == '\r'
-              && in.size() > end+1
-              && in.at(end+1) == '\n')
+              && in.size() > end + 1
+              && in.at(end + 1) == '\n')
             next_begin = end + 2;
           else
             next_begin = end + 1;
 
           if (flags & split_flags::keep_endings)
-            out.push_back(in.substr(begin, next_begin-begin));
+            out.push_back(in.substr(begin, next_begin - begin));
           else
-            out.push_back(in.substr(begin, end-begin));
+            out.push_back(in.substr(begin, end - begin));
 
           begin = next_begin;
 
@@ -118,16 +118,18 @@ void split_into_lines(string const & in,
             break;
           end = in.find_first_of("\r\n", begin);
         }
-      if (begin < in.size()) {
-        // special case: last line without trailing newline
-        string s = in.substr(begin, in.size() - begin);
-        if (flags & split_flags::diff_compat) {
-          // special handling: produce diff(1) compatible output
-          s += (in.find_first_of("\r") != string::npos ? "\r\n" : "\n");
-          s += "\\ No newline at end of file";
+      if (begin < in.size())
+        {
+          // special case: last line without trailing newline
+          string s = in.substr(begin, in.size() - begin);
+          if (flags & split_flags::diff_compat)
+            {
+              // special handling: produce diff(1) compatible output
+              s += (in.find_first_of("\r") != string::npos ? "\r\n" : "\n");
+              s += "\\ No newline at end of file";
+            }
+          out.push_back(s);
         }
-        out.push_back(s);
-      }
     }
   else
     {
@@ -237,7 +239,7 @@ trim_right(string const & s, string cons
   // characters then the entire string is made up of these characters
 
   pos = tmp.find_last_of(chars);
-  if (pos == tmp.size()-1)
+  if (pos == tmp.size() - 1)
     tmp = "";
 
   return tmp;
============================================================
--- src/simplestring_xform.hh	ab0d125445f13982344bbe3f713e2ce90d11cb66
+++ src/simplestring_xform.hh	82ae3326bc09ab0f08ba6a424f1952cd9e7f1b53
@@ -17,7 +17,8 @@ namespace split_flags
 
 namespace split_flags
 {
-  enum split_flags {
+  enum split_flags
+  {
     none = 0,
     diff_compat = 1,
     keep_endings = 2
@@ -74,7 +75,7 @@ std::vector< T > split_into_words(T cons
 
   while (end != std::string::npos && end >= begin)
     {
-      out.push_back(from_string<T>(instr.substr(begin, end-begin),
+      out.push_back(from_string<T>(instr.substr(begin, end - begin),
                                    get_made_from(in)));
       begin = end + 1;
       if (begin >= instr.size())
============================================================
--- src/platform-wrapped.hh	56fdcf095ffef702042d9483aaeb62bc832369a1
+++ src/platform-wrapped.hh	a13edab3d54b35ab15b967b6e9903af0d850a6fb
@@ -23,7 +23,7 @@ inline path::status get_path_status(any_
 inline path::status get_path_status(any_path const & path)
 {
   std::string p(path.as_external());
-  return get_path_status(p.empty()?".":p);
+  return get_path_status(p.empty() ? "." : p);
 }
 
 inline void rename_clobberingly(any_path const & from, any_path const & to)
============================================================
--- src/mtn-sanity.cc	d6b11b878814b0a40a4dd0ae4e0f666566a1d8aa
+++ src/mtn-sanity.cc	1636e5bce6c325a2cf9103dbbb36d538a5b8aeb6
@@ -33,7 +33,7 @@ void
 }
 
 void
-mtn_sanity::inform_log(std::string const &msg)
+mtn_sanity::inform_log(std::string const & msg)
 {
   if (debug_p())
     {
@@ -42,19 +42,19 @@ void
 }
 
 void
-mtn_sanity::inform_message(std::string const &msg)
+mtn_sanity::inform_message(std::string const & msg)
 {
   ui.inform(msg);
 }
 
 void
-mtn_sanity::inform_warning(std::string const &msg)
+mtn_sanity::inform_warning(std::string const & msg)
 {
   ui.warn(msg);
 }
 
 void
-mtn_sanity::inform_error(std::string const &msg)
+mtn_sanity::inform_error(std::string const & msg)
 {
   ui.inform(msg);
 }
============================================================
--- src/mtn-sanity.hh	4703095d226cf4f77a1d5ac4c1142963f193bcad
+++ src/mtn-sanity.hh	5d51215a5bdea565cf83c2c759d2db503c581a76
@@ -19,10 +19,10 @@ private:
   void initialize(int, char **, char const *);
 
 private:
-  void inform_log(std::string const &msg);
-  void inform_message(std::string const &msg);
-  void inform_warning(std::string const &msg);
-  void inform_error(std::string const &msg);
+  void inform_log(std::string const & msg);
+  void inform_message(std::string const & msg);
+  void inform_warning(std::string const & msg);
+  void inform_error(std::string const & msg);
 };
 
 #endif
============================================================
--- src/luaext_globish.cc	470ed3d2b5443f902335346ae5d4afc4f0ef8ffa
+++ src/luaext_globish.cc	d13bd3416b058e26057aa94a0c01ade0b83d5e6f
@@ -16,18 +16,23 @@ LUAEXT(match, globish)
 
 LUAEXT(match, globish)
 {
-  const char *re = luaL_checkstring(LS, -2);
-  const char *str = luaL_checkstring(LS, -1);
+  const char * re = luaL_checkstring(LS, -2);
+  const char * str = luaL_checkstring(LS, -1);
 
   bool result = false;
-  try {
-    globish g(re, origin::user);
-    result = g.matches(str);
-  } catch (recoverable_failure & e) {
-    return luaL_error(LS, e.what());
-  } catch (...) {
-    return luaL_error(LS, "Unknown error.");
-  }
+  try
+    {
+      globish g(re, origin::user);
+      result = g.matches(str);
+    }
+  catch (recoverable_failure & e)
+    {
+      return luaL_error(LS, e.what());
+    }
+  catch (...)
+    {
+      return luaL_error(LS, "Unknown error.");
+    }
   lua_pushboolean(LS, result);
   return 1;
 }
============================================================
--- src/luaext_guess_binary.cc	2bc668e174bf62b58c47a9bbba758362d12639d9
+++ src/luaext_guess_binary.cc	e6c9a70da996bbe13be349b64e87e2dd94007724
@@ -20,7 +20,7 @@ LUAEXT(guess_binary_file_contents, )
 
 LUAEXT(guess_binary_file_contents, )
 {
-  const char *path = luaL_checkstring(LS, 1);
+  const char * path = luaL_checkstring(LS, 1);
 
   ifstream file(path, ios_base::binary);
   if (!file)
============================================================
--- src/luaext_mkstemp.cc	5170df290e82446908b2aee9704d1e905df0d03c
+++ src/luaext_mkstemp.cc	9b9dc591dd05d05bce1654ce1d51da048598d80e
@@ -19,7 +19,7 @@ LUAEXT(mkstemp, )
 
 LUAEXT(mkstemp, )
 {
-  char const *filename = luaL_checkstring (LS, 1);
+  char const * filename = luaL_checkstring (LS, 1);
   string dup(filename);
 
   if (!monotone_mkstemp(dup))
============================================================
--- src/luaext_parse_basic_io.cc	995b37d3cc3eb58a3c6be6db06e38f997ba6845e
+++ src/luaext_parse_basic_io.cc	088b7865260014a7d079b071ebad66bc769a4027
@@ -51,14 +51,15 @@ LUAEXT(parse_basic_io, )
       while (tt != basic_io::TOK_NONE);
     }
   catch (recoverable_failure & e)
-    {// there was a syntax error in our string
+    {
+      // there was a syntax error in our string
       lua_pushnil(LS);
       return 1;
     }
   lua_newtable(LS);
   int n = 1;
   for (vector<pair<string, vector<string> > >::const_iterator i = res.begin();
-        i != res.end(); ++i)
+       i != res.end(); ++i)
     {
       lua_newtable(LS);
       lua_pushstring(LS, i->first.c_str());
@@ -66,7 +67,7 @@ LUAEXT(parse_basic_io, )
       lua_newtable(LS);
       int m = 1;
       for (vector<string>::const_iterator j = i->second.begin();
-            j != i->second.end(); ++j)
+           j != i->second.end(); ++j)
         {
           lua_pushstring(LS, j->c_str());
           lua_rawseti(LS, -2, m++);
============================================================
--- src/luaext_platform.cc	288a571d7c91fbb4abffb66611b52aac74889aa2
+++ src/luaext_platform.cc	2c1c19ecf2b2745eecba2063cf23a83e09b672da
@@ -30,28 +30,28 @@ LUAEXT(existsonpath, )
 
 LUAEXT(existsonpath, )
 {
-  const char *exe = luaL_checkstring(LS, -1);
+  const char * exe = luaL_checkstring(LS, -1);
   lua_pushnumber(LS, existsonpath(exe));
   return 1;
 }
 
 LUAEXT(is_executable, )
 {
-  const char *path = luaL_checkstring(LS, -1);
+  const char * path = luaL_checkstring(LS, -1);
   lua_pushboolean(LS, is_executable(path));
   return 1;
 }
 
 LUAEXT(set_executable, )
 {
-  const char *path = luaL_checkstring(LS, -1);
+  const char * path = luaL_checkstring(LS, -1);
   lua_pushnumber(LS, set_executable(path));
   return 1;
 }
 
 LUAEXT(clear_executable, )
 {
-  const char *path = luaL_checkstring(LS, -1);
+  const char * path = luaL_checkstring(LS, -1);
   lua_pushnumber(LS, clear_executable(path));
   return 1;
 }
@@ -59,14 +59,14 @@ LUAEXT(spawn, )
 LUAEXT(spawn, )
 {
   int n = lua_gettop(LS);
-  const char *path = luaL_checkstring(LS, 1);
-  char **argv = (char**)malloc((n+1)*sizeof(char*));
+  const char * path = luaL_checkstring(LS, 1);
+  char ** argv = (char **)malloc((n + 1) * sizeof(char *));
   int i;
   pid_t ret;
-  if (argv==NULL)
+  if (argv == NULL)
     return 0;
-  argv[0] = (char*)path;
-  for (i=1; i<n; i++) argv[i] = (char*)luaL_checkstring(LS, i+1);
+  argv[0] = (char *)path;
+  for (i = 1; i < n; i++) argv[i] = (char *)luaL_checkstring(LS, i + 1);
   argv[i] = NULL;
   ret = process_spawn(argv);
   free(argv);
@@ -80,15 +80,15 @@ LUAEXT(spawn_redirected, )
   char const * infile = luaL_checkstring(LS, 1);
   char const * outfile = luaL_checkstring(LS, 2);
   char const * errfile = luaL_checkstring(LS, 3);
-  const char *path = luaL_checkstring(LS, 4);
+  const char * path = luaL_checkstring(LS, 4);
   n -= 3;
-  char **argv = (char**)malloc((n+1)*sizeof(char*));
+  char ** argv = (char **)malloc((n + 1) * sizeof(char *));
   int i;
   pid_t ret;
-  if (argv==NULL)
+  if (argv == NULL)
     return 0;
-  argv[0] = (char*)path;
-  for (i=1; i<n; i++) argv[i] = (char*)luaL_checkstring(LS,  i+4);
+  argv[0] = (char *)path;
+  for (i = 1; i < n; i++) argv[i] = (char *)luaL_checkstring(LS,  i + 4);
   argv[i] = NULL;
   ret = process_spawn_redirected(infile, outfile, errfile, argv);
   free(argv);
@@ -103,16 +103,18 @@ LUAEXT(spawn_redirected, )
 
 #define topfile(LS)     ((FILE **)luaL_checkudata(LS, 1, LUA_FILEHANDLE))
 
-static int io_fclose (lua_State *LS) {
-  FILE **p = topfile(LS);
+static int io_fclose (lua_State * LS)
+{
+  FILE ** p = topfile(LS);
   int ok = (fclose(*p) == 0);
   *p = NULL;
   lua_pushboolean(LS, ok);
   return 1;
 }
 
-static FILE **newfile (lua_State *LS) {
-  FILE **pf = (FILE **)lua_newuserdata(LS, sizeof(FILE *));
+static FILE ** newfile (lua_State * LS)
+{
+  FILE ** pf = (FILE **)lua_newuserdata(LS, sizeof(FILE *));
   *pf = NULL;  /* file handle is currently `closed' */
   luaL_getmetatable(LS, LUA_FILEHANDLE);
   lua_setmetatable(LS, -2);
@@ -126,20 +128,20 @@ LUAEXT(spawn_pipe, )
 LUAEXT(spawn_pipe, )
 {
   int n = lua_gettop(LS);
-  char **argv = (char**)malloc((n+1)*sizeof(char*));
+  char ** argv = (char **)malloc((n + 1) * sizeof(char *));
   int i;
   pid_t pid;
-  if (argv==NULL)
+  if (argv == NULL)
     return 0;
-  if (n<1)
+  if (n < 1)
     return 0;
-  for (i=0; i<n; i++) argv[i] = (char*)luaL_checkstring(LS,  i+1);
+  for (i = 0; i < n; i++) argv[i] = (char *)luaL_checkstring(LS,  i + 1);
   argv[i] = NULL;
 
   int infd;
-  FILE **inpf = newfile(LS);
+  FILE ** inpf = newfile(LS);
   int outfd;
-  FILE **outpf = newfile(LS);
+  FILE ** outpf = newfile(LS);
 
   pid = process_spawn_pipe(argv, inpf, outpf);
   free(argv);
@@ -165,7 +167,7 @@ LUAEXT(kill, )
   int n = lua_gettop(LS);
   pid_t pid = static_cast<pid_t>(luaL_checknumber(LS, -2));
   int sig;
-  if (n>1)
+  if (n > 1)
     sig = static_cast<int>(luaL_checknumber(LS, -1));
   else
     sig = SIGTERM;
@@ -251,7 +253,7 @@ namespace
     {
       lua_newtable(st);
     }
-    virtual void consume(const char *s)
+    virtual void consume(const char * s)
     {
       lua_pushstring(st, s);
       lua_rawseti(st, -2, n);
============================================================
--- src/roster_delta.cc	a8bba8fe7ce5ac1b18b996a6501727065304a526
+++ src/roster_delta.cc	6dc49f6012825875f1466f245c018869faee9052
@@ -36,17 +36,17 @@ namespace
   struct roster_delta_t
   {
     typedef std::set<node_id> nodes_deleted_t;
-    typedef std::map<pair<node_id, path_component>,
-                     node_id> dirs_added_t;
-    typedef std::map<pair<node_id, path_component>,
-                     pair<node_id, file_id> > files_added_t;
-    typedef std::map<node_id,
-                     pair<node_id, path_component> > nodes_renamed_t;
+    typedef std::map < pair<node_id, path_component>,
+            node_id > dirs_added_t;
+    typedef std::map < pair<node_id, path_component>,
+            pair<node_id, file_id> > files_added_t;
+    typedef std::map < node_id,
+            pair<node_id, path_component> > nodes_renamed_t;
     typedef std::map<node_id, file_id> deltas_applied_t;
     typedef std::set<pair<node_id, attr_key> > attrs_cleared_t;
-    typedef std::set<pair<node_id,
-                          pair<attr_key,
-                               pair<bool, attr_value> > > > attrs_changed_t;
+    typedef std::set < pair < node_id,
+            pair < attr_key,
+            pair<bool, attr_value> > > > attrs_changed_t;
     typedef std::map<node_id, const_marking_t> markings_changed_t;
 
     nodes_deleted_t nodes_deleted;
@@ -70,58 +70,58 @@ namespace
   {
     // Detach everything that should be detached.
     for (nodes_deleted_t::const_iterator
-           i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
+         i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
       roster.detach_node(*i);
     for (nodes_renamed_t::const_iterator
-           i = nodes_renamed.begin(); i != nodes_renamed.end(); ++i)
+         i = nodes_renamed.begin(); i != nodes_renamed.end(); ++i)
       roster.detach_node(i->first);
 
     // Delete the delete-able things.
     for (nodes_deleted_t::const_iterator
-           i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
+         i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
       roster.drop_detached_node(*i);
 
     // Add the new things.
     for (dirs_added_t::const_iterator
-           i = dirs_added.begin(); i != dirs_added.end(); ++i)
+         i = dirs_added.begin(); i != dirs_added.end(); ++i)
       roster.create_dir_node(i->second);
     for (files_added_t::const_iterator
-           i = files_added.begin(); i != files_added.end(); ++i)
+         i = files_added.begin(); i != files_added.end(); ++i)
       roster.create_file_node(i->second.second, i->second.first);
 
     // Attach everything.
     for (dirs_added_t::const_iterator
-           i = dirs_added.begin(); i != dirs_added.end(); ++i)
+         i = dirs_added.begin(); i != dirs_added.end(); ++i)
       roster.attach_node(i->second, i->first.first, i->first.second);
     for (files_added_t::const_iterator
-           i = files_added.begin(); i != files_added.end(); ++i)
+         i = files_added.begin(); i != files_added.end(); ++i)
       roster.attach_node(i->second.first, i->first.first, i->first.second);
     for (nodes_renamed_t::const_iterator
-           i = nodes_renamed.begin(); i != nodes_renamed.end(); ++i)
+         i = nodes_renamed.begin(); i != nodes_renamed.end(); ++i)
       roster.attach_node(i->first, i->second.first, i->second.second);
 
     // Okay, all the tricky tree-rearranging is done, just have to do some
     // individual node edits now.
     for (deltas_applied_t::const_iterator
-           i = deltas_applied.begin(); i != deltas_applied.end(); ++i)
+         i = deltas_applied.begin(); i != deltas_applied.end(); ++i)
       roster.set_content(i->first, i->second);
 
     for (attrs_cleared_t::const_iterator
-           i = attrs_cleared.begin(); i != attrs_cleared.end(); ++i)
+         i = attrs_cleared.begin(); i != attrs_cleared.end(); ++i)
       roster.erase_attr(i->first, i->second);
 
     for (attrs_changed_t::const_iterator
-           i = attrs_changed.begin(); i != attrs_changed.end(); ++i)
+         i = attrs_changed.begin(); i != attrs_changed.end(); ++i)
       roster.set_attr_unknown_to_dead_ok(i->first, i->second.first, i->second.second);
 
     // And finally, update the marking map.
     for (nodes_deleted_t::const_iterator
-           i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
+         i = nodes_deleted.begin(); i != nodes_deleted.end(); ++i)
       {
         markings.remove_marking(*i);
       }
     for (markings_changed_t::const_iterator
-           i = markings_changed.begin(); i != markings_changed.end(); ++i)
+         i = markings_changed.begin(); i != markings_changed.end(); ++i)
       {
         markings.put_or_replace_marking(i->first, i->second);
       }
@@ -424,27 +424,27 @@ namespace
     string contents;
 
     for (roster_delta_t::nodes_deleted_t::const_iterator
-           i = d.nodes_deleted.begin(); i != d.nodes_deleted.end(); ++i)
+         i = d.nodes_deleted.begin(); i != d.nodes_deleted.end(); ++i)
       {
         push_nid(syms::deleted, *i, contents, 7);
         contents += "\n";
       }
     for (roster_delta_t::nodes_renamed_t::const_iterator
-           i = d.nodes_renamed.begin(); i != d.nodes_renamed.end(); ++i)
+         i = d.nodes_renamed.begin(); i != d.nodes_renamed.end(); ++i)
       {
         push_nid(syms::rename, i->first, contents, 8);
         push_loc(i->second, contents, 8);
         contents += "\n";
       }
     for (roster_delta_t::dirs_added_t::const_iterator
-           i = d.dirs_added.begin(); i != d.dirs_added.end(); ++i)
+         i = d.dirs_added.begin(); i != d.dirs_added.end(); ++i)
       {
         push_nid(syms::add_dir, i->second, contents, 8);
         push_loc(i->first, contents, 8);
         contents += "\n";
       }
     for (roster_delta_t::files_added_t::const_iterator
-           i = d.files_added.begin(); i != d.files_added.end(); ++i)
+         i = d.files_added.begin(); i != d.files_added.end(); ++i)
       {
         push_nid(syms::add_file, i->second.first, contents, 8);
         push_loc(i->first, contents, 8);
@@ -453,7 +453,7 @@ namespace
         contents.append("]\n\n");
       }
     for (roster_delta_t::deltas_applied_t::const_iterator
-           i = d.deltas_applied.begin(); i != d.deltas_applied.end(); ++i)
+         i = d.deltas_applied.begin(); i != d.deltas_applied.end(); ++i)
       {
         push_nid(syms::delta, i->first, contents, 7);
         contents.append("content [");
@@ -461,7 +461,7 @@ namespace
         contents.append("]\n\n");
       }
     for (roster_delta_t::attrs_cleared_t::const_iterator
-           i = d.attrs_cleared.begin(); i != d.attrs_cleared.end(); ++i)
+         i = d.attrs_cleared.begin(); i != d.attrs_cleared.end(); ++i)
       {
         push_nid(syms::attr_cleared, i->first, contents, 12);
         contents.append("        attr \"");
@@ -469,7 +469,7 @@ namespace
         contents.append("\"\n\n");
       }
     for (roster_delta_t::attrs_changed_t::const_iterator
-           i = d.attrs_changed.begin(); i != d.attrs_changed.end(); ++i)
+         i = d.attrs_changed.begin(); i != d.attrs_changed.end(); ++i)
       {
         push_nid(syms::attr_changed, i->first, contents, 12);
         contents.append("        attr \"");
@@ -481,7 +481,7 @@ namespace
         contents.append("\"\n\n");
       }
     for (roster_delta_t::markings_changed_t::const_iterator
-           i = d.markings_changed.begin(); i != d.markings_changed.end(); ++i)
+         i = d.markings_changed.begin(); i != d.markings_changed.end(); ++i)
       {
         bool is_file = !i->second->file_content.empty();
         int symbol_length = (is_file ? 12 : 9);
@@ -673,8 +673,8 @@ try_get_content_from_roster_delta(roster
 // -- in this case content is left undefined.
 bool
 try_get_content_from_roster_delta(roster_delta const & del,
-                              node_id const & nid,
-                              file_id & content)
+                                  node_id const & nid,
+                                  file_id & content)
 {
   roster_delta_t d;
   read_roster_delta(del, d);
============================================================
--- src/graph.cc	bdda595f7f9c3b6f1513389550f316bf3ebe688d
+++ src/graph.cc	73f589e3aeded5a90b01e9bcf4012bc311f66110
@@ -101,7 +101,7 @@ get_reconstruction_path(id const & start
               // Replicate the path if there's a fork.
               bool first = true;
               for (set<id>::const_iterator j = next.begin();
-                    j != next.end(); ++j)
+                   j != next.end(); ++j)
                 {
                   if (global_sanity.debug_p())
                     L(FL("considering %s -> %s") % tip % *j);
@@ -167,7 +167,7 @@ void toposort_rev_ancestry(rev_ancestry_
   // find the set of graph roots
   list<revision_id> roots;
   for (pi i = pcount.begin(); i != pcount.end(); ++i)
-    if(i->second==0)
+    if(i->second == 0)
       roots.push_back(i->first);
 
   while (!roots.empty())
@@ -200,16 +200,16 @@ advance_frontier(set<height_rev_pair> & 
   set<revision_id> parents;
   rg.get_parents(node, parents);
   for (set<revision_id>::const_iterator r = parents.begin();
-        r != parents.end(); r++)
-  {
-    if (seen.find(*r) == seen.end())
+       r != parents.end(); r++)
     {
-      rev_height h;
-      rg.get_height(*r, h);
-      frontier.insert(make_pair(h, *r));
-      seen.insert(*r);
+      if (seen.find(*r) == seen.end())
+        {
+          rev_height h;
+          rg.get_height(*r, h);
+          frontier.insert(make_pair(h, *r));
+          seen.insert(*r);
+        }
     }
-  }
 }
 
 void
@@ -242,54 +242,54 @@ get_uncommon_ancestors(revision_id const
   b_seen.insert(b);
 
   while (!a_frontier.empty() || !b_frontier.empty())
-  {
-    // We take the leaf-most (ie highest) height entry from any frontier.
-    // Note: the default height is the lowest possible.
-    rev_height a_height, b_height, common_height;
-    if (!a_frontier.empty())
-      a_height = a_frontier.rbegin()->first;
-    if (!b_frontier.empty())
-      b_height = b_frontier.rbegin()->first;
-    if (!common_frontier.empty())
-      common_height = common_frontier.rbegin()->first;
+    {
+      // We take the leaf-most (ie highest) height entry from any frontier.
+      // Note: the default height is the lowest possible.
+      rev_height a_height, b_height, common_height;
+      if (!a_frontier.empty())
+        a_height = a_frontier.rbegin()->first;
+      if (!b_frontier.empty())
+        b_height = b_frontier.rbegin()->first;
+      if (!common_frontier.empty())
+        common_height = common_frontier.rbegin()->first;
 
-    if (a_height > b_height && a_height > common_height)
-      {
-        a_uncommon_ancs.insert(a_frontier.rbegin()->second);
-        advance_frontier(a_frontier, a_seen, rg);
-      }
-    else if (b_height > a_height && b_height > common_height)
-      {
-        b_uncommon_ancs.insert(b_frontier.rbegin()->second);
-        advance_frontier(b_frontier, b_seen, rg);
-      }
-    else if (common_height > a_height && common_height > b_height)
-      {
-        advance_frontier(common_frontier, common_seen, rg);
-      }
-    else if (a_height == b_height) // may or may not also == common_height
-      {
-        // if both frontiers are the same, then we can safely say that
-        // we've found all uncommon ancestors. This stopping condition
-        // can result in traversing more nodes than required, but is simple.
-        if (a_frontier == b_frontier)
-          break;
+      if (a_height > b_height && a_height > common_height)
+        {
+          a_uncommon_ancs.insert(a_frontier.rbegin()->second);
+          advance_frontier(a_frontier, a_seen, rg);
+        }
+      else if (b_height > a_height && b_height > common_height)
+        {
+          b_uncommon_ancs.insert(b_frontier.rbegin()->second);
+          advance_frontier(b_frontier, b_seen, rg);
+        }
+      else if (common_height > a_height && common_height > b_height)
+        {
+          advance_frontier(common_frontier, common_seen, rg);
+        }
+      else if (a_height == b_height) // may or may not also == common_height
+        {
+          // if both frontiers are the same, then we can safely say that
+          // we've found all uncommon ancestors. This stopping condition
+          // can result in traversing more nodes than required, but is simple.
+          if (a_frontier == b_frontier)
+            break;
 
-        common_frontier.insert(*a_frontier.rbegin());
-        a_frontier.erase(*a_frontier.rbegin());
-        b_frontier.erase(*b_frontier.rbegin());
-      }
-    else if (a_height == common_height)
-      {
-        a_frontier.erase(*a_frontier.rbegin());
-      }
-    else if (b_height == common_height)
-      {
-        b_frontier.erase(*b_frontier.rbegin());
-      }
-    else
-      I(false);
-  }
+          common_frontier.insert(*a_frontier.rbegin());
+          a_frontier.erase(*a_frontier.rbegin());
+          b_frontier.erase(*b_frontier.rbegin());
+        }
+      else if (a_height == common_height)
+        {
+          a_frontier.erase(*a_frontier.rbegin());
+        }
+      else if (b_height == common_height)
+        {
+          b_frontier.erase(*b_frontier.rbegin());
+        }
+      else
+        I(false);
+    }
 }
 
 
============================================================
--- src/graph.hh	ab4c40bcdcde42949481cbc0e3c29ba66ab07a9c
+++ src/graph.hh	2b65afd542c728777ac75d0de33ccae977be96d6
@@ -35,7 +35,7 @@ void toposort_rev_ancestry(rev_ancestry_
                         reconstruction_path & path);
 
 void toposort_rev_ancestry(rev_ancestry_map const & graph,
-                          std::vector<revision_id> & revisions);
+                           std::vector<revision_id> & revisions);
 
 struct rev_graph
 {
============================================================
--- src/lru_writeback_cache.hh	cfa4c94526bd19a2084d2f1086f1fe85364d0062
+++ src/lru_writeback_cache.hh	13d1849bab48c3a4c564d604bf4caa601e311efd
@@ -51,9 +51,9 @@ template <typename Key, typename Data> s
  *
  */
 // Manager is a concept with a writeout(Key, Data) method
-template <typename Key, typename Data,
-          typename Sizefn = WritebackCountfn<Data>,
-          typename Manager = NullManager<Key, Data> >
+template < typename Key, typename Data,
+         typename Sizefn = WritebackCountfn<Data>,
+         typename Manager = NullManager<Key, Data> >
 class LRUWritebackCache
 {
 public:
============================================================
--- src/sha1.cc	5e1aa972d7c7d66e06320b039989652b830dcd75
+++ src/sha1.cc	a3946859601e0531b7f4b74c9a262df794a87fc4
@@ -18,8 +18,8 @@
 // own timer and measures botan's different SHA1 providers, instead of
 // only measuring one.
 #if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,7,23)
-  #include <botan/libstate.h>
-  #include <botan/benchmark.h>
+#include <botan/libstate.h>
+#include <botan/benchmark.h>
 #endif
 
 #include "sanity.hh"
@@ -40,7 +40,7 @@ CMD_HIDDEN(benchmark_sha1, "benchmark_sh
 #if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,7,23)
 
   Botan::AutoSeeded_RNG rng;
-  Botan::Algorithm_Factory& af =
+  Botan::Algorithm_Factory & af =
     Botan::global_state().algorithm_factory();
 
   const int milliseconds = 5000;
============================================================
--- src/option.cc	7d0c3b6ca2eccfcdb1907e5498abdd74507a3612
+++ src/option.cc	407ac49827148579e4fbb74cb47cb20fa4b07523
@@ -18,323 +18,324 @@ using std::vector;
 using std::string;
 using std::vector;
 
-namespace option {
+namespace option
+{
 
-option_error::option_error(std::string const & str)
- : std::invalid_argument((F("option error: %s") % str).str())
-{}
+  option_error::option_error(std::string const & str)
+    : std::invalid_argument((F("option error: %s") % str).str())
+  {}
 
-unknown_option::unknown_option(std::string const & opt)
- : option_error((F("unknown option '%s'") % opt).str())
-{}
+  unknown_option::unknown_option(std::string const & opt)
+    : option_error((F("unknown option '%s'") % opt).str())
+  {}
 
-missing_arg::missing_arg(std::string const & opt)
- : option_error((F("missing argument to option '%s'") % opt).str())
-{}
+  missing_arg::missing_arg(std::string const & opt)
+    : option_error((F("missing argument to option '%s'") % opt).str())
+  {}
 
-extra_arg::extra_arg(std::string const & opt)
- : option_error((F("option '%s' does not take an argument") % opt).str())
-{}
+  extra_arg::extra_arg(std::string const & opt)
+    : option_error((F("option '%s' does not take an argument") % opt).str())
+  {}
 
-bad_arg::bad_arg(std::string const & opt, arg_type const & arg)
- : option_error((F("bad argument '%s' to option '%s'") % arg() % opt).str())
-{}
+  bad_arg::bad_arg(std::string const & opt, arg_type const & arg)
+    : option_error((F("bad argument '%s' to option '%s'") % arg() % opt).str())
+  {}
 
-bad_arg::bad_arg(std::string const & opt,
-                 arg_type const & arg,
-                 std::string const & reason)
- : option_error((F("bad argument '%s' to option '%s': %s")
-                   % arg() % opt % reason).str())
-{}
+  bad_arg::bad_arg(std::string const & opt,
+                   arg_type const & arg,
+                   std::string const & reason)
+    : option_error((F("bad argument '%s' to option '%s': %s")
+                    % arg() % opt % reason).str())
+  {}
 
-bad_arg_internal::bad_arg_internal(string const & str)
- : reason(str)
-{}
+  bad_arg_internal::bad_arg_internal(string const & str)
+    : reason(str)
+  {}
 
 
 
-void splitname(char const * f, string & name, string & n, string & cancel)
-{
-  string from(f);
-  if (from.find("/") != string::npos)
-    {
-      string::size_type slash = from.find("/");
-      cancel = from.substr(slash+1);
-      from.erase(slash);
-    }
-  // from looks like "foo" or "foo,f"
-  string::size_type comma = from.find(',');
-  name = from.substr(0, comma);
-  if (comma != string::npos)
-    n = from.substr(comma+1, 1);
-  else
-    n = "";
+  void splitname(char const * f, string & name, string & n, string & cancel)
+  {
+    string from(f);
+    if (from.find("/") != string::npos)
+      {
+        string::size_type slash = from.find("/");
+        cancel = from.substr(slash + 1);
+        from.erase(slash);
+      }
+    // from looks like "foo" or "foo,f"
+    string::size_type comma = from.find(',');
+    name = from.substr(0, comma);
+    if (comma != string::npos)
+      n = from.substr(comma + 1, 1);
+    else
+      n = "";
 
-  // "o" is equivalent to ",o"; it gives an option
-  // with only a short name
-  if (name.size() == 1)
-    {
-      I(n.empty());
-      n = name;
-      name = "";
-    }
-}
+    // "o" is equivalent to ",o"; it gives an option
+    // with only a short name
+    if (name.size() == 1)
+      {
+        I(n.empty());
+        n = name;
+        name = "";
+      }
+  }
 
 
-concrete_option::concrete_option()
-  : has_arg(false)
-{}
+  concrete_option::concrete_option()
+    : has_arg(false)
+  {}
 
-concrete_option::concrete_option(char const * names,
-                                 char const * desc,
-                                 bool arg,
-                                 boost::function<void (std::string)> set,
-                                 boost::function<void ()> reset,
-                                 bool hide,
-                                 char const * deprecate)
-{
-  description = desc;
-  splitname(names, longname, shortname, cancelname);
-  I((desc && desc[0]) || !longname.empty() || !shortname.empty());
-  // not sure how to display if it can only be reset (and what would that mean?)
-  I((!longname.empty() || !shortname.empty()) || cancelname.empty());
-  // If an option has a name (ie, can be set), it must have a setter function
-  I(set || (longname.empty() && shortname.empty()));
-  // If an option can be canceled, it must have a resetter function
-  I(reset || cancelname.empty());
-  has_arg = arg;
-  setter = set;
-  resetter = reset;
-  hidden = hide;
-  deprecated = deprecate;
-}
+  concrete_option::concrete_option(char const * names,
+                                   char const * desc,
+                                   bool arg,
+                                   boost::function<void (std::string)> set,
+                                   boost::function<void ()> reset,
+                                   bool hide,
+                                   char const * deprecate)
+  {
+    description = desc;
+    splitname(names, longname, shortname, cancelname);
+    I((desc && desc[0]) || !longname.empty() || !shortname.empty());
+    // not sure how to display if it can only be reset (and what would that mean?)
+    I((!longname.empty() || !shortname.empty()) || cancelname.empty());
+    // If an option has a name (ie, can be set), it must have a setter function
+    I(set || (longname.empty() && shortname.empty()));
+    // If an option can be canceled, it must have a resetter function
+    I(reset || cancelname.empty());
+    has_arg = arg;
+    setter = set;
+    resetter = reset;
+    hidden = hide;
+    deprecated = deprecate;
+  }
 
-bool concrete_option::operator<(concrete_option const & other) const
-{
-  if (longname != other.longname)
-    return longname < other.longname;
-  if (shortname != other.shortname)
-    return shortname < other.shortname;
-  if (cancelname != other.cancelname)
-    return cancelname < other.cancelname;
-  return description < other.description;
-}
+  bool concrete_option::operator<(concrete_option const & other) const
+  {
+    if (longname != other.longname)
+      return longname < other.longname;
+    if (shortname != other.shortname)
+      return shortname < other.shortname;
+    if (cancelname != other.cancelname)
+      return cancelname < other.cancelname;
+    return description < other.description;
+  }
 
-concrete_option_set
-operator | (concrete_option const & a, concrete_option const & b)
-{
-  return concrete_option_set(a) | b;
-}
+  concrete_option_set
+  operator | (concrete_option const & a, concrete_option const & b)
+  {
+    return concrete_option_set(a) | b;
+  }
 
-concrete_option_set::concrete_option_set()
-{}
+  concrete_option_set::concrete_option_set()
+  {}
 
-concrete_option_set::concrete_option_set(std::set<concrete_option> const & other)
-  : options(other)
-{}
+  concrete_option_set::concrete_option_set(std::set<concrete_option> const & other)
+    : options(other)
+  {}
 
-concrete_option_set::concrete_option_set(concrete_option const & opt)
-{
-  options.insert(opt);
-}
+  concrete_option_set::concrete_option_set(concrete_option const & opt)
+  {
+    options.insert(opt);
+  }
 
 // essentially the opposite of std::bind1st
-class discard_argument
-{
-  boost::function<void()> functor;
- public:
-  discard_argument(boost::function<void()> const & from)
-    : functor(from)
+  class discard_argument
+  {
+    boost::function<void()> functor;
+  public:
+    discard_argument(boost::function<void()> const & from)
+      : functor(from)
     {}
     void operator()(std::string const &)
     { return functor(); }
-};
+  };
 
-concrete_option_set &
-concrete_option_set::operator()(char const * names,
-                                char const * desc,
-                                boost::function<void ()> set,
-                                boost::function<void ()> reset,
-                                bool hide,
-                                char const * deprecate)
-{
-  options.insert(concrete_option(names, desc, false, discard_argument(set),
-                                 reset, hide, deprecate));
-  return *this;
-}
+  concrete_option_set &
+  concrete_option_set::operator()(char const * names,
+                                  char const * desc,
+                                  boost::function<void ()> set,
+                                  boost::function<void ()> reset,
+                                  bool hide,
+                                  char const * deprecate)
+  {
+    options.insert(concrete_option(names, desc, false, discard_argument(set),
+                                   reset, hide, deprecate));
+    return *this;
+  }
 
-concrete_option_set &
-concrete_option_set::operator()(char const * names,
-                                char const * desc,
-                                boost::function<void (string)> set,
-                                boost::function<void ()> reset,
-                                bool hide,
-                                char const * deprecate)
-{
-  options.insert(concrete_option(names, desc, true, set, reset, hide, deprecate));
-  return *this;
-}
+  concrete_option_set &
+  concrete_option_set::operator()(char const * names,
+                                  char const * desc,
+                                  boost::function<void (string)> set,
+                                  boost::function<void ()> reset,
+                                  bool hide,
+                                  char const * deprecate)
+  {
+    options.insert(concrete_option(names, desc, true, set, reset, hide, deprecate));
+    return *this;
+  }
 
-concrete_option_set
-concrete_option_set::operator | (concrete_option_set const & other) const
-{
-  concrete_option_set combined;
-  std::set_union(options.begin(), options.end(),
-                 other.options.begin(), other.options.end(),
-                 std::inserter(combined.options, combined.options.begin()));
-  return combined;
-}
+  concrete_option_set
+  concrete_option_set::operator | (concrete_option_set const & other) const
+  {
+    concrete_option_set combined;
+    std::set_union(options.begin(), options.end(),
+                   other.options.begin(), other.options.end(),
+                   std::inserter(combined.options, combined.options.begin()));
+    return combined;
+  }
 
-void concrete_option_set::reset() const
-{
-  for (std::set<concrete_option>::const_iterator i = options.begin();
-       i != options.end(); ++i)
-    {
-      if (i->resetter)
-        i->resetter();
-    }
-}
+  void concrete_option_set::reset() const
+  {
+    for (std::set<concrete_option>::const_iterator i = options.begin();
+         i != options.end(); ++i)
+      {
+        if (i->resetter)
+          i->resetter();
+      }
+  }
 
-static void
-tokenize_for_command_line(string const & from, args_vector & to)
-{
-  // Unfortunately, the tokenizer in basic_io is too format-specific
-  to.clear();
-  enum quote_type {none, one, two};
-  string cur;
-  quote_type type = none;
-  bool have_tok(false);
+  static void
+  tokenize_for_command_line(string const & from, args_vector & to)
+  {
+    // Unfortunately, the tokenizer in basic_io is too format-specific
+    to.clear();
+    enum quote_type {none, one, two};
+    string cur;
+    quote_type type = none;
+    bool have_tok(false);
 
-  for (string::const_iterator i = from.begin(); i != from.end(); ++i)
-    {
-      if (*i == '\'')
-        {
-          if (type == none)
-            type = one;
-          else if (type == one)
-            type = none;
-          else
-            {
-              cur += *i;
-              have_tok = true;
-            }
-        }
-      else if (*i == '"')
-        {
-          if (type == none)
-            type = two;
-          else if (type == two)
-            type = none;
-          else
-            {
-              cur += *i;
-              have_tok = true;
-            }
-        }
-      else if (*i == '\\')
-        {
-          if (type != one)
-            ++i;
-          E(i != from.end(), origin::user, F("invalid escape in '--xargs' file"));
-          cur += *i;
-          have_tok = true;
-        }
-      else if (string(" \n\t").find(*i) != string::npos)
-        {
-          if (type == none)
-            {
-              if (have_tok)
-                to.push_back(arg_type(cur, origin::user));
-              cur.clear();
-              have_tok = false;
-            }
-          else
-            {
-              cur += *i;
-              have_tok = true;
-            }
-        }
-      else
-        {
-          cur += *i;
-          have_tok = true;
-        }
-    }
-  if (have_tok)
-    to.push_back(arg_type(cur, origin::user));
-}
+    for (string::const_iterator i = from.begin(); i != from.end(); ++i)
+      {
+        if (*i == '\'')
+          {
+            if (type == none)
+              type = one;
+            else if (type == one)
+              type = none;
+            else
+              {
+                cur += *i;
+                have_tok = true;
+              }
+          }
+        else if (*i == '"')
+          {
+            if (type == none)
+              type = two;
+            else if (type == two)
+              type = none;
+            else
+              {
+                cur += *i;
+                have_tok = true;
+              }
+          }
+        else if (*i == '\\')
+          {
+            if (type != one)
+              ++i;
+            E(i != from.end(), origin::user, F("invalid escape in '--xargs' file"));
+            cur += *i;
+            have_tok = true;
+          }
+        else if (string(" \n\t").find(*i) != string::npos)
+          {
+            if (type == none)
+              {
+                if (have_tok)
+                  to.push_back(arg_type(cur, origin::user));
+                cur.clear();
+                have_tok = false;
+              }
+            else
+              {
+                cur += *i;
+                have_tok = true;
+              }
+          }
+        else
+          {
+            cur += *i;
+            have_tok = true;
+          }
+      }
+    if (have_tok)
+      to.push_back(arg_type(cur, origin::user));
+  }
 
-void concrete_option_set::from_command_line(int argc,
-                                            char const * const * argv)
-{
-  args_vector arguments;
-  for (int i = 1; i < argc; ++i)
-    arguments.push_back(arg_type(argv[i], origin::user));
-  from_command_line(arguments);
-}
+  void concrete_option_set::from_command_line(int argc,
+                                              char const * const * argv)
+  {
+    args_vector arguments;
+    for (int i = 1; i < argc; ++i)
+      arguments.push_back(arg_type(argv[i], origin::user));
+    from_command_line(arguments);
+  }
 
 // checks a multi-word option like 'no-builtin-rcfile' against a
 // possible abbreviated given option 'nbr' which is only compiled
 // of the first character of each word
-static bool
-abbrev_match(string const & option, string const & part)
-{
-  if (option.find('-') == 0)
-    return false;
+  static bool
+  abbrev_match(string const & option, string const & part)
+  {
+    if (option.find('-') == 0)
+      return false;
 
-  string::const_iterator it = option.begin();
-  string opt_part(1, *it);
-  for (; it != option.end(); ++it)
-    {
-      if (*it == '-' && it != option.end())
-        opt_part += *(it+1);
-    }
+    string::const_iterator it = option.begin();
+    string opt_part(1, *it);
+    for (; it != option.end(); ++it)
+      {
+        if (*it == '-' && it != option.end())
+          opt_part += *(it + 1);
+      }
 
-  return part == opt_part;
-}
+    return part == opt_part;
+  }
 
-static concrete_option const &
-getopt(map<string, concrete_option> const & by_name, string & name)
-{
-  // try to match the option name as a whole first, so if the user
-  // specified "--foo" and we have "--foo" and "--foo-bar", don't
-  // display both choices
-  map<string, concrete_option>::const_iterator i = by_name.find(name);
-  if (i != by_name.end())
-    return i->second;
+  static concrete_option const &
+  getopt(map<string, concrete_option> const & by_name, string & name)
+  {
+    // try to match the option name as a whole first, so if the user
+    // specified "--foo" and we have "--foo" and "--foo-bar", don't
+    // display both choices
+    map<string, concrete_option>::const_iterator i = by_name.find(name);
+    if (i != by_name.end())
+      return i->second;
 
-  if (name.size() == 0)
-    throw unknown_option(name);
+    if (name.size() == 0)
+      throw unknown_option(name);
 
-  // try to find the option by partial name
-  set<string> candidates;
-  for (i = by_name.begin(); i != by_name.end(); ++i)
-    {
-      if (i->first.find(name) == 0)
-        candidates.insert(i->first);
-      if (abbrev_match(i->first, name))
-        candidates.insert(i->first);
-    }
+    // try to find the option by partial name
+    set<string> candidates;
+    for (i = by_name.begin(); i != by_name.end(); ++i)
+      {
+        if (i->first.find(name) == 0)
+          candidates.insert(i->first);
+        if (abbrev_match(i->first, name))
+          candidates.insert(i->first);
+      }
 
-  if (candidates.size() == 0)
-    throw unknown_option(name);
+    if (candidates.size() == 0)
+      throw unknown_option(name);
 
-  if (candidates.size() == 1)
-    {
-       string expanded_name = *candidates.begin();
-       i = by_name.find(expanded_name);
-       I(i != by_name.end());
-       L(FL("expanding option '%s' to '%s'") % name % expanded_name);
-       name = expanded_name;
-       return i->second;
-    }
+    if (candidates.size() == 1)
+      {
+        string expanded_name = *candidates.begin();
+        i = by_name.find(expanded_name);
+        I(i != by_name.end());
+        L(FL("expanding option '%s' to '%s'") % name % expanded_name);
+        name = expanded_name;
+        return i->second;
+      }
 
-  string err = (F("option '%s' has multiple ambiguous expansions:")
-                % name).str();
+    string err = (F("option '%s' has multiple ambiguous expansions:")
+                  % name).str();
 
-  for (set<string>::const_iterator j = candidates.begin();
-       j != candidates.end(); ++j)
-    {
+    for (set<string>::const_iterator j = candidates.begin();
+         j != candidates.end(); ++j)
+      {
         i = by_name.find(*j);
         I(i != by_name.end());
 
@@ -347,294 +348,294 @@ getopt(map<string, concrete_option> cons
           err += "\n'--" + *j + "' (" + (F("negation of '--%s'") % i->second.longname).str() + ")";
         else
           err += "\n'--" + *j + "' (" + i->second.description + ")";
-    }
+      }
 
-  E(false, origin::user, i18n_format(err));
-}
+    E(false, origin::user, i18n_format(err));
+  }
 
 // helper for get_by_name
 // Make sure that either:
 //   * There are no duplicate options, or
 //   * If we're only parsing options (and not applying them), any duplicates
 //     are consistent WRT whether they take an option
-typedef pair<map<string, concrete_option>::iterator, bool> by_name_res_type;
-static void check_by_name_insertion(by_name_res_type const & res,
-                                    concrete_option const & opt,
-                                    concrete_option_set::preparse_flag pf)
-{
-  switch (pf)
-    {
-    case concrete_option_set::preparse:
-      if (!res.second)
-        {
-          string const & name = res.first->first;
-          concrete_option const & them = res.first->second;
-          bool const i_have_arg = (name != opt.cancelname && opt.has_arg);
-          bool const they_have_arg = (name != them.cancelname && them.has_arg);
-          I(i_have_arg == they_have_arg);
-        }
-      break;
-    case concrete_option_set::no_preparse:
-      I(res.second);
-      break;
-    }
-}
+  typedef pair<map<string, concrete_option>::iterator, bool> by_name_res_type;
+  static void check_by_name_insertion(by_name_res_type const & res,
+                                      concrete_option const & opt,
+                                      concrete_option_set::preparse_flag pf)
+  {
+    switch (pf)
+      {
+      case concrete_option_set::preparse:
+        if (!res.second)
+          {
+            string const & name = res.first->first;
+            concrete_option const & them = res.first->second;
+            bool const i_have_arg = (name != opt.cancelname && opt.has_arg);
+            bool const they_have_arg = (name != them.cancelname && them.has_arg);
+            I(i_have_arg == they_have_arg);
+          }
+        break;
+      case concrete_option_set::no_preparse:
+        I(res.second);
+        break;
+      }
+  }
 
 // generate an index that lets us look options up by name
-static map<string, concrete_option>
-get_by_name(std::set<concrete_option> const & options,
-            concrete_option_set::preparse_flag pf)
-{
-  map<string, concrete_option> by_name;
-  for (std::set<concrete_option>::const_iterator i = options.begin();
-       i != options.end(); ++i)
-    {
-      if (!i->longname.empty())
-        check_by_name_insertion(by_name.insert(make_pair(i->longname, *i)),
-                                *i, pf);
-      if (!i->shortname.empty())
-        check_by_name_insertion(by_name.insert(make_pair(i->shortname, *i)),
-                                *i, pf);
-      if (!i->cancelname.empty())
-        check_by_name_insertion(by_name.insert(make_pair(i->cancelname, *i)),
-                                *i, pf);
-    }
-  return by_name;
-}
+  static map<string, concrete_option>
+  get_by_name(std::set<concrete_option> const & options,
+              concrete_option_set::preparse_flag pf)
+  {
+    map<string, concrete_option> by_name;
+    for (std::set<concrete_option>::const_iterator i = options.begin();
+         i != options.end(); ++i)
+      {
+        if (!i->longname.empty())
+          check_by_name_insertion(by_name.insert(make_pair(i->longname, *i)),
+                                  *i, pf);
+        if (!i->shortname.empty())
+          check_by_name_insertion(by_name.insert(make_pair(i->shortname, *i)),
+                                  *i, pf);
+        if (!i->cancelname.empty())
+          check_by_name_insertion(by_name.insert(make_pair(i->cancelname, *i)),
+                                  *i, pf);
+      }
+    return by_name;
+  }
 
-void concrete_option_set::from_command_line(args_vector & args,
-                                            preparse_flag pf)
-{
-  map<string, concrete_option> by_name = get_by_name(options, pf);
+  void concrete_option_set::from_command_line(args_vector & args,
+                                              preparse_flag pf)
+  {
+    map<string, concrete_option> by_name = get_by_name(options, pf);
 
-  bool seen_dashdash = false;
-  for (args_vector::size_type i = 0; i < args.size(); ++i)
-    {
-      concrete_option o;
-      string name;
-      arg_type arg;
-      bool is_cancel;
-      bool separate_arg(false);
-      if (idx(args,i)() == "--" || seen_dashdash)
-        {
-          if (!seen_dashdash)
-            {
-              seen_dashdash = true;
-              continue;
-            }
-          name = "--";
-          o = getopt(by_name, name);
-          arg = idx(args,i);
-          is_cancel = false;
-        }
-      else if (idx(args,i)().substr(0,2) == "--")
-        {
-          string::size_type equals = idx(args,i)().find('=');
-          if (equals == string::npos)
-            name = idx(args,i)().substr(2);
-          else
-            name = idx(args,i)().substr(2, equals-2);
+    bool seen_dashdash = false;
+    for (args_vector::size_type i = 0; i < args.size(); ++i)
+      {
+        concrete_option o;
+        string name;
+        arg_type arg;
+        bool is_cancel;
+        bool separate_arg(false);
+        if (idx(args, i)() == "--" || seen_dashdash)
+          {
+            if (!seen_dashdash)
+              {
+                seen_dashdash = true;
+                continue;
+              }
+            name = "--";
+            o = getopt(by_name, name);
+            arg = idx(args, i);
+            is_cancel = false;
+          }
+        else if (idx(args, i)().substr(0, 2) == "--")
+          {
+            string::size_type equals = idx(args, i)().find('=');
+            if (equals == string::npos)
+              name = idx(args, i)().substr(2);
+            else
+              name = idx(args, i)().substr(2, equals - 2);
 
-          o = getopt(by_name, name);
-          is_cancel = (name == o.cancelname);
-          if ((!o.has_arg || is_cancel) && equals != string::npos)
+            o = getopt(by_name, name);
+            is_cancel = (name == o.cancelname);
+            if ((!o.has_arg || is_cancel) && equals != string::npos)
               throw extra_arg(name);
 
-          if (o.has_arg && !is_cancel)
-            {
-              if (equals == string::npos)
-                {
-                  separate_arg = true;
-                  if (i+1 == args.size())
-                    throw missing_arg(name);
-                  arg = idx(args,i+1);
-                }
-              else
-                arg = arg_type(idx(args,i)().substr(equals+1), origin::user);
-            }
-        }
-      else if (idx(args,i)().substr(0,1) == "-")
-        {
-          name = idx(args,i)().substr(1,1);
+            if (o.has_arg && !is_cancel)
+              {
+                if (equals == string::npos)
+                  {
+                    separate_arg = true;
+                    if (i + 1 == args.size())
+                      throw missing_arg(name);
+                    arg = idx(args, i + 1);
+                  }
+                else
+                  arg = arg_type(idx(args, i)().substr(equals + 1), origin::user);
+              }
+          }
+        else if (idx(args, i)().substr(0, 1) == "-")
+          {
+            name = idx(args, i)().substr(1, 1);
 
-          map<string, concrete_option>::const_iterator j = by_name.find(name);
-          if (j == by_name.end())
-            throw unknown_option(name);
-          o = j->second;
+            map<string, concrete_option>::const_iterator j = by_name.find(name);
+            if (j == by_name.end())
+              throw unknown_option(name);
+            o = j->second;
 
-          is_cancel = (name == o.cancelname);
-          I(!is_cancel);
-          if (!o.has_arg && idx(args,i)().size() != 2)
-            throw extra_arg(name);
+            is_cancel = (name == o.cancelname);
+            I(!is_cancel);
+            if (!o.has_arg && idx(args, i)().size() != 2)
+              throw extra_arg(name);
 
-          if (o.has_arg)
-            {
-              if (idx(args,i)().size() == 2)
-                {
-                  separate_arg = true;
-                  if (i+1 == args.size())
-                    throw missing_arg(name);
-                  arg = idx(args,i+1);
-                }
-              else
-                arg = arg_type(idx(args,i)().substr(2), origin::user);
-            }
-        }
-      else
-        {
-          name = "--";
-          o = getopt(by_name, name);
-          arg = idx(args,i);
-          is_cancel = false;
-        }
+            if (o.has_arg)
+              {
+                if (idx(args, i)().size() == 2)
+                  {
+                    separate_arg = true;
+                    if (i + 1 == args.size())
+                      throw missing_arg(name);
+                    arg = idx(args, i + 1);
+                  }
+                else
+                  arg = arg_type(idx(args, i)().substr(2), origin::user);
+              }
+          }
+        else
+          {
+            name = "--";
+            o = getopt(by_name, name);
+            arg = idx(args, i);
+            is_cancel = false;
+          }
 
-      if (name == "xargs" || name == "@")
-        {
-          // expand the --xargs in place
-          data dat;
-          read_data_for_command_line(arg, dat);
-          args_vector fargs;
-          tokenize_for_command_line(dat(), fargs);
+        if (name == "xargs" || name == "@")
+          {
+            // expand the --xargs in place
+            data dat;
+            read_data_for_command_line(arg, dat);
+            args_vector fargs;
+            tokenize_for_command_line(dat(), fargs);
 
-          args.erase(args.begin() + i);
-          if (separate_arg)
             args.erase(args.begin() + i);
-          args.insert(args.begin()+i, fargs.begin(), fargs.end());
-          --i;
-        }
-      else
-        {
-          if (separate_arg)
-            ++i;
-          try
-            {
-              if (o.deprecated)
-                W(F("deprecated option '%s' used: %s")
-                  % o.longname % gettext(o.deprecated));
-              if (!is_cancel)
-                {
-                  if (o.setter)
-                    o.setter(arg());
-                }
-              else
-                {
-                  if (o.resetter)
-                    o.resetter();
-                }
-            }
-          catch (boost::bad_lexical_cast)
-            {
-              throw bad_arg(o.longname, arg);
-            }
-          catch (bad_arg_internal & e)
-            {
-              if (e.reason == "")
+            if (separate_arg)
+              args.erase(args.begin() + i);
+            args.insert(args.begin() + i, fargs.begin(), fargs.end());
+            --i;
+          }
+        else
+          {
+            if (separate_arg)
+              ++i;
+            try
+              {
+                if (o.deprecated)
+                  W(F("deprecated option '%s' used: %s")
+                    % o.longname % gettext(o.deprecated));
+                if (!is_cancel)
+                  {
+                    if (o.setter)
+                      o.setter(arg());
+                  }
+                else
+                  {
+                    if (o.resetter)
+                      o.resetter();
+                  }
+              }
+            catch (boost::bad_lexical_cast)
+              {
                 throw bad_arg(o.longname, arg);
-              else
-                throw bad_arg(o.longname, arg, e.reason);
-            }
-        }
-    }
-}
+              }
+            catch (bad_arg_internal & e)
+              {
+                if (e.reason == "")
+                  throw bad_arg(o.longname, arg);
+                else
+                  throw bad_arg(o.longname, arg, e.reason);
+              }
+          }
+      }
+  }
 
-void concrete_option_set::from_key_value_pairs(vector<pair<string, string> > const & keyvals)
-{
-  map<string, concrete_option> by_name = get_by_name(options, no_preparse);
+  void concrete_option_set::from_key_value_pairs(vector<pair<string, string> > const & keyvals)
+  {
+    map<string, concrete_option> by_name = get_by_name(options, no_preparse);
 
-  for (vector<pair<string, string> >::const_iterator i = keyvals.begin();
-       i != keyvals.end(); ++i)
-    {
-      string key(i->first);
-      arg_type const & value(arg_type(i->second, origin::user));
+    for (vector<pair<string, string> >::const_iterator i = keyvals.begin();
+         i != keyvals.end(); ++i)
+      {
+        string key(i->first);
+        arg_type const & value(arg_type(i->second, origin::user));
 
-      concrete_option o = getopt(by_name, key);
-      bool const is_cancel = (key == o.cancelname);
+        concrete_option o = getopt(by_name, key);
+        bool const is_cancel = (key == o.cancelname);
 
-      try
-        {
-          if (o.deprecated)
-            W(F("deprecated option '%s' used: %s")
-              % o.longname % gettext(o.deprecated));
+        try
+          {
+            if (o.deprecated)
+              W(F("deprecated option '%s' used: %s")
+                % o.longname % gettext(o.deprecated));
 
-          if (!is_cancel)
-            {
-              if (o.setter)
-                o.setter(value());
-            }
-          else
-            {
-              if (o.resetter)
-                o.resetter();
-            }
-        }
-      catch (boost::bad_lexical_cast)
-        {
-          throw bad_arg(o.longname, value);
-        }
-      catch (bad_arg_internal & e)
-        {
-          if (e.reason == "")
+            if (!is_cancel)
+              {
+                if (o.setter)
+                  o.setter(value());
+              }
+            else
+              {
+                if (o.resetter)
+                  o.resetter();
+              }
+          }
+        catch (boost::bad_lexical_cast)
+          {
             throw bad_arg(o.longname, value);
-          else
-            throw bad_arg(o.longname, value, e.reason);
-        }
-    }
-}
+          }
+        catch (bad_arg_internal & e)
+          {
+            if (e.reason == "")
+              throw bad_arg(o.longname, value);
+            else
+              throw bad_arg(o.longname, value, e.reason);
+          }
+      }
+  }
 
 // Get the non-description part of the usage string,
 // looks like "--long [ -s ] <arg> / --cancel".
-static string usagestr(concrete_option const & opt)
-{
-  string out;
-  if (opt.longname == "--")
-    return "";
-  if (!opt.longname.empty() && !opt.shortname.empty())
-    out = "--" + opt.longname + " [ -" + opt.shortname + " ]";
-  else if (!opt.longname.empty())
-    out = "--" + opt.longname;
-  else if (!opt.shortname.empty())
-    out = "-" + opt.shortname;
+  static string usagestr(concrete_option const & opt)
+  {
+    string out;
+    if (opt.longname == "--")
+      return "";
+    if (!opt.longname.empty() && !opt.shortname.empty())
+      out = "--" + opt.longname + " [ -" + opt.shortname + " ]";
+    else if (!opt.longname.empty())
+      out = "--" + opt.longname;
+    else if (!opt.shortname.empty())
+      out = "-" + opt.shortname;
 
-  if (out.empty())
-    return out;
+    if (out.empty())
+      return out;
 
-  if (opt.has_arg)
-    out += " <arg>";
+    if (opt.has_arg)
+      out += " <arg>";
 
-  if (!opt.cancelname.empty())
-    {
-      if (!out.empty())
-        out += " / ";
-      out += "--" + opt.cancelname;
-    }
+    if (!opt.cancelname.empty())
+      {
+        if (!out.empty())
+          out += " / ";
+        out += "--" + opt.cancelname;
+      }
 
-  return out;
-}
+    return out;
+  }
 
-void
-concrete_option_set::get_usage_strings(vector<string> & names,
-                                       vector<string> & descriptions,
-                                       unsigned int & maxnamelen,
-                                       bool show_hidden) const
-{
-  unsigned int namelen = 0; // the longest option name string
-  names.clear();
-  descriptions.clear();
-  for (std::set<concrete_option>::const_iterator i = options.begin();
-       i != options.end(); ++i)
-    {
-      if (i->hidden && !show_hidden)
-        continue;
-      if (i->deprecated)
-        continue;
-      string name = usagestr(*i);
-      if (name.size() > namelen)
-        namelen = name.size();
-      names.push_back(name);
-      descriptions.push_back(gettext(i->description));
-    }
-  maxnamelen = namelen;
-}
+  void
+  concrete_option_set::get_usage_strings(vector<string> & names,
+                                         vector<string> & descriptions,
+                                         unsigned int & maxnamelen,
+                                         bool show_hidden) const
+  {
+    unsigned int namelen = 0; // the longest option name string
+    names.clear();
+    descriptions.clear();
+    for (std::set<concrete_option>::const_iterator i = options.begin();
+         i != options.end(); ++i)
+      {
+        if (i->hidden && !show_hidden)
+          continue;
+        if (i->deprecated)
+          continue;
+        string name = usagestr(*i);
+        if (name.size() > namelen)
+          namelen = name.size();
+        names.push_back(name);
+        descriptions.push_back(gettext(i->description));
+      }
+    maxnamelen = namelen;
+  }
 
 } // namespace option
 
============================================================
--- src/option.hh	853a798c3150eef1824a73daca05d1e1cf1eb3f8
+++ src/option.hh	b671c64d0baad05da771208066080da34bb860e1
@@ -31,7 +31,8 @@
 #include "vocab.hh"
 
 // The types to represent the command line's parameters.
-class arg_type : public utf8 {
+class arg_type : public utf8
+{
 public:
   explicit arg_type(void) : utf8() {}
   arg_type(std::string const & s, origin::type f) : utf8(s, f) {}
@@ -41,7 +42,8 @@ typedef std::vector< arg_type > args_vec
 inline void dump(arg_type const & a, std::string & out) { out = a(); }
 typedef std::vector< arg_type > args_vector;
 
-namespace option {
+namespace option
+{
   // Base for errors thrown by this code.
   struct option_error : public std::invalid_argument
   {
@@ -226,10 +228,10 @@ namespace option {
   struct binder_only
   {
     T * obj;
-    boost::function<void(T*)> fun;
-    binder_only(boost::function<void(T*)> const & f, T * o)
+    boost::function<void(T *)> fun;
+    binder_only(boost::function<void(T *)> const & f, T * o)
       : obj(o), fun(f)
-      {}
+    {}
     void operator()()
     {
       fun(obj);
@@ -244,8 +246,8 @@ namespace option {
     char const * description;
     char const * names;
     bool has_arg;
-    boost::function<void (T*, std::string)> setter;
-    boost::function<void (T*)> resetter;
+    boost::function<void (T *, std::string)> setter;
+    boost::function<void (T *)> resetter;
     bool hidden;
     char const * deprecated;
 
@@ -299,7 +301,7 @@ namespace option {
   struct option_set
   {
     std::set<option<T> > options;
-    option_set(){}
+    option_set() {}
     option_set(option_set<T> const & other)
       : options(other.options)
     {}
============================================================
--- src/options_list.hh	0462e302b89179f4acb28ecb91f4255140d4a4a7
+++ src/options_list.hh	bb5041345a606db989cfb73fa66144da6a8ceb33
@@ -241,8 +241,8 @@ SIMPLE_OPTION(remote_stdio_host, "remote
                            "if you want to prevent use of older protocol versions"))
 
 SIMPLE_OPTION(remote_stdio_host, "remote-stdio-host", arg_type,
-    gettext_noop("sets the host (and optionally the port) for a "
-                 "remote netsync action"))
+              gettext_noop("sets the host (and optionally the port) for a "
+                           "remote netsync action"))
 
 SIMPLE_OPTION(branch, "branch,b", branch_name,
               gettext_noop("select branch cert for operation"))
@@ -265,7 +265,7 @@ GROUPED_SIMPLE_OPTION(globals, no_defaul
 #endif
 
 GROUPED_SIMPLE_OPTION(globals, no_default_confdir, "no-default-confdir/allow-default-confdir", bool,
-                     gettext_noop("forbid use of the default confdir"))
+                      gettext_noop("forbid use of the default confdir"))
 
 SIMPLE_OPTION(date, "date", date_t,
               gettext_noop("override date/time for commit"))
@@ -311,8 +311,8 @@ GROUPED_SIMPLE_OPTION(globals, roster_ca
 
 HIDE(roster_cache_performance_log)
 GROUPED_SIMPLE_OPTION(globals, roster_cache_performance_log, "roster-cache-performance-log",
-                     system_path,
-                     gettext_noop("log roster cache statistic to the given file"))
+                      system_path,
+                      gettext_noop("log roster cache statistic to the given file"))
 
 SIMPLE_OPTION(depth, "depth", restricted_long<0>,
               gettext_noop("limit the number of levels of directories to descend"))
@@ -323,25 +323,25 @@ GROUPED_SIMPLE_OPTION(diff_options, exte
 OPTSET_REL(diff_options, au_diff_options)
 
 GROUPED_SIMPLE_OPTION(diff_options, external_diff_args, "diff-args", std::string,
-        gettext_noop("argument to pass external diff hook"))
+                      gettext_noop("argument to pass external diff hook"))
 GROUPED_SIMPLE_OPTION(au_diff_options, reverse, "reverse", bool,
-        gettext_noop("reverse order of diff"))
+                      gettext_noop("reverse order of diff"))
 GROUPED_SIMPLE_OPTION(diff_options, no_show_encloser, "no-show-encloser/show-encloser", bool,
-     gettext_noop("do not show the function containing each block of changes"))
+                      gettext_noop("do not show the function containing each block of changes"))
 OPTSET_REL(au_diff_options, with_header)
 SIMPLE_OPTION(with_header, "with-header/without-header", bool,
               gettext_noop("show the matching cset in the diff header"))
 
 OPTVAR(diff_options, diff_type, diff_format, unified_diff)
 OPTION(diff_options, diff_context, false, "context",
-        gettext_noop("use context diff format"))
+       gettext_noop("use context diff format"))
 #ifdef option_bodies
 {
   diff_format = context_diff;
 }
 #endif
 OPTION(diff_options, diff_external, false, "external",
-        gettext_noop("use external diff hook for generating diffs"))
+       gettext_noop("use external diff hook for generating diffs"))
 #ifdef option_bodies
 {
   diff_format = external_diff;
@@ -368,30 +368,30 @@ GROUPED_SIMPLE_OPTION(globals, dump, "du
               gettext_noop("drop certs signed by keys we don't know about"))
 
 GROUPED_SIMPLE_OPTION(globals, dump, "dump", system_path,
-        gettext_noop("file to dump debugging log to, on failure"))
+                      gettext_noop("file to dump debugging log to, on failure"))
 
 SIMPLE_OPTION(exclude, "exclude", args_vector,
               gettext_noop("leave out anything described by its argument"))
 SIMPLE_OPTION(include, "include", args_vector,
-        gettext_noop("include anything described by its argument"))
+              gettext_noop("include anything described by its argument"))
 
 SIMPLE_OPTION(bookkeep_only, "bookkeep-only", bool,
-        gettext_noop("only update monotone's internal bookkeeping, not the filesystem"))
+              gettext_noop("only update monotone's internal bookkeeping, not the filesystem"))
 
 SIMPLE_OPTION(move_conflicting_paths,
               "move-conflicting-paths/no-move-conflicting-paths",
               bool,
               (F("move conflicting, unversioned paths into '%s' "
                  "before proceeding with any workspace change") %
-                 bookkeeping_resolutions_dir).str().c_str())
+               bookkeeping_resolutions_dir).str().c_str())
 
 OPTSET_REL(globals, ssh_sign)
 SIMPLE_INITIALIZED_OPTION(ssh_sign, "ssh-sign", enum_string, "yes,no,only,check",
-     gettext_noop("controls use of ssh-agent.  valid arguments are: "
-                  "'yes' to use ssh-agent to make signatures if possible, "
-                  "'no' to force use of monotone's internal code, "
-                  "'only' to force use of ssh-agent, "
-                  "'check' to sign with both and compare"))
+                          gettext_noop("controls use of ssh-agent.  valid arguments are: "
+                                       "'yes' to use ssh-agent to make signatures if possible, "
+                                       "'no' to force use of monotone's internal code, "
+                                       "'only' to force use of ssh-agent, "
+                                       "'check' to sign with both and compare"))
 
 SIMPLE_OPTION(force_duplicate_key, "force-duplicate-key", bool,
               gettext_noop("force genkey to not error out when the named key "
@@ -404,14 +404,14 @@ GROUPED_SIMPLE_OPTION(globals, ignore_su
               gettext_noop("show hidden commands and options"))
 
 GROUPED_SIMPLE_OPTION(globals, ignore_suspend_certs, "ignore-suspend-certs/no-ignore-suspend-certs", bool,
-                     gettext_noop("do not ignore revisions marked as suspended"))
+                      gettext_noop("do not ignore revisions marked as suspended"))
 
 GROUPED_SIMPLE_OPTION(globals, non_interactive, "non-interactive/interactive", bool,
-                     gettext_noop("do not prompt the user for input"))
+                      gettext_noop("do not prompt the user for input"))
 
 GROUPED_SIMPLE_OPTION(globals, key, "key,k/use-default-key", external_key_name,
-       gettext_noop("sets the key for signatures, using either the key "
-                    "name or the key hash"))
+                      gettext_noop("sets the key for signatures, using either the key "
+                                   "name or the key hash"))
 
 OPTSET_REL(globals, key_dir)
 SIMPLE_INITIALIZED_OPTION(key_dir, "keydir", system_path,
@@ -419,22 +419,22 @@ SIMPLE_OPTION(keys_to_push, "key-to-push
                           gettext_noop("set location of key store"))
 
 SIMPLE_OPTION(keys_to_push, "key-to-push", std::vector<external_key_name>,
-        gettext_noop("push the specified key even if it hasn't signed anything"))
+              gettext_noop("push the specified key even if it hasn't signed anything"))
 
 SIMPLE_OPTION(last, "last", restricted_long<1>,
               gettext_noop("limit log output to the last number of entries"))
 
 GROUPED_SIMPLE_OPTION(globals, log, "log", system_path,
-                     gettext_noop("file to write the log to"))
+                      gettext_noop("file to write the log to"))
 
 OPTSET(messages)
 GROUPED_SIMPLE_OPTION(messages, message, "message,m", std::vector<std::string>,
-        gettext_noop("set commit changelog message"))
+                      gettext_noop("set commit changelog message"))
 GROUPED_SIMPLE_OPTION(messages, msgfile, "message-file", utf8,
-        gettext_noop("set filename containing commit changelog message"))
+                      gettext_noop("set filename containing commit changelog message"))
 HIDE(no_prefix)
 GROUPED_SIMPLE_OPTION(messages, no_prefix, "no-prefix", bool,
-        gettext_noop("no prefix to message"))
+                      gettext_noop("no prefix to message"))
 
 SIMPLE_OPTION(missing, "missing", bool,
               gettext_noop("perform the operations for files missing from workspace"))
@@ -465,7 +465,7 @@ GROUPED_SIMPLE_OPTION(globals, nostd, "n
 #undef NORC_TEXT
 
 GROUPED_SIMPLE_OPTION(globals, nostd, "no-builtin-rcfile/builtin-rcfile", bool,
-                     gettext_noop("do not load the built-in lua file with the default hooks"))
+                      gettext_noop("do not load the built-in lua file with the default hooks"))
 
 DEPRECATE(old_norc, gettext_noop("please use '--no-standard-rcfiles' instead"), 1.0, 2.0)
 OPTION(globals, old_norc, false, "norc",
@@ -481,7 +481,7 @@ GROUPED_SIMPLE_OPTION(globals, extra_rcf
 #endif
 
 GROUPED_SIMPLE_OPTION(globals, extra_rcfiles, "rcfile/clear-rcfiles", args_vector,
-                     gettext_noop("load extra lua file"))
+                      gettext_noop("load extra lua file"))
 
 SIMPLE_OPTION(pidfile, "pid-file/no-pid-file", system_path,
               gettext_noop("record process id of server"))
@@ -491,7 +491,7 @@ OPTION(verbosity, quiet, false, "quiet,q
 OPTVAR(verbosity, int, verbosity, 0)
 
 OPTION(verbosity, quiet, false, "quiet,q",
-     gettext_noop("decrease verbosity (undo previous '-v', then disable informational output, then disable warnings)"))
+       gettext_noop("decrease verbosity (undo previous '-v', then disable informational output, then disable warnings)"))
 #ifdef option_bodies
 {
   --verbosity;
@@ -522,7 +522,7 @@ OPTION(verbosity, reallyquiet, false, "r
 
 DEPRECATE(reallyquiet, gettext_noop("please use '-q -q'"), 1.0, 2.0)
 OPTION(verbosity, reallyquiet, false, "reallyquiet",
-     gettext_noop("suppress warning, verbose, informational and progress messages"))
+       gettext_noop("suppress warning, verbose, informational and progress messages"))
 #ifdef option_bodies
 {
   verbosity = -2;
@@ -530,7 +530,7 @@ SIMPLE_OPTION(full, "full/concise", bool
 #endif
 
 SIMPLE_OPTION(full, "full/concise", bool,
-       gettext_noop("print detailed information"))
+              gettext_noop("print detailed information"))
 
 SIMPLE_OPTION(formatted, "formatted/plain", bool,
               gettext_noop("automatically run the output through nroff (default if the output is a terminal)"))
@@ -542,8 +542,8 @@ SIMPLE_OPTION(recursive, "recursive,R/no
 SIMPLE_OPTION(recursive, "recursive,R/no-recursive", bool,
               gettext_noop("also operate on the contents of any listed directories"))
 
-SIMPLE_OPTION(revision, "revision,r",args_vector,
-     gettext_noop("select revision id for operation"))
+SIMPLE_OPTION(revision, "revision,r", args_vector,
+              gettext_noop("select revision id for operation"))
 
 GROUPED_SIMPLE_OPTION(globals, root, "root", std::string,
                       gettext_noop("limit search for workspace to specified root"))
@@ -578,7 +578,7 @@ OPTSET_REL(automate_inventory_opts, no_u
 SIMPLE_OPTION(no_ignored, "no-ignored/ignored", bool,
               gettext_noop("don't output ignored files"))
 OPTSET_REL(automate_inventory_opts, no_unknown)
-SIMPLE_OPTION(no_unknown, "no-unknown/unknown",bool,
+SIMPLE_OPTION(no_unknown, "no-unknown/unknown", bool,
               gettext_noop("don't output unknown files"))
 OPTSET_REL(automate_inventory_opts, no_unchanged)
 SIMPLE_OPTION(no_unchanged, "no-unchanged/unchanged", bool,
@@ -608,7 +608,7 @@ SIMPLE_OPTION(resolve_conflicts, "resolv
 
 OPTSET_REL(resolve_conflicts_opts, resolve_conflicts)
 SIMPLE_OPTION(resolve_conflicts, "resolve-conflicts/no-resolve-conflicts", bool,
-       gettext_noop("specify conflict resolutions in a file, instead of interactively"))
+              gettext_noop("specify conflict resolutions in a file, instead of interactively"))
 
 OPTSET(conflicts_opts)
 OPTVAR(conflicts_opts, bookkeeping_path, conflicts_file, bookkeeping_conflicts_file)
============================================================
--- src/options.cc	aa1b83a97c8af2aaedfa061447d8a12a2be04e01
+++ src/options.cc	8938e79d5a5996bc1ec7fa353456cd79cde1deee
@@ -63,10 +63,10 @@ options::children()
   return val;
 }
 
-std::map<options::static_options_fun, std::list<void(options::*)()> > &
+std::map<options::static_options_fun, std::list<void(options:: *)()> > &
 options::var_membership()
 {
-  static map<static_options_fun, std::list<void(options::*)()> > val;
+  static map<static_options_fun, std::list<void(options:: *)()> > val;
   static bool first(true);
   if (first)
     {
@@ -210,8 +210,8 @@ void options::reset_optset(options::stat
 
 void options::reset_optset(options::static_options_fun opt)
 {
-  list<void(options::*)()> const & vars = var_membership()[opt];
-  for (list<void(options::*)()>::const_iterator i = vars.begin();
+  list<void(options:: *)()> const & vars = var_membership()[opt];
+  for (list<void(options:: *)()>::const_iterator i = vars.begin();
        i != vars.end(); ++i)
     {
       (this->*(*i))();
============================================================
--- src/options.hh	a1ab6f9b8cd25fcb3338cc78b31d87d9e6df2026
+++ src/options.hh	68367508db1af92409cbd823946cb501d60111ee
@@ -121,7 +121,7 @@ struct options
   typedef options_type const & (*static_options_fun)();
 
   static std::map<static_options_fun, std::set<static_options_fun> > &children();
-  static std::map<static_options_fun, std::list<void(options::*)()> > &var_membership();
+  static std::map<static_options_fun, std::list<void(options:: *)()> > &var_membership();
   static std::map<static_options_fun, bool> &hidden();
   static std::map<static_options_fun, char const *> &deprecated();
 
============================================================
--- src/pcrewrap.cc	7f5bd4e7d8d9e02f2b0271fa9a0f64dcbce2467d
+++ src/pcrewrap.cc	18585099e76610c71edbf2f012546c1556b0edd0
@@ -79,7 +79,7 @@ namespace pcre
     if (!basedat)
       pcre_compile_error(errcode, err, erroff, pattern, made_from);
 
-    pcre_extra *ed = pcre_study(basedat, 0, &err);
+    pcre_extra * ed = pcre_study(basedat, 0, &err);
     if (err)
       pcre_study_error(err, pattern, made_from);
     if (!ed)
@@ -155,8 +155,9 @@ namespace pcre
 
     // "int ovector[worksize]" is C99 only (not valid C++, but allowed by gcc/clang)
     // boost::shared_array is I think not plannned to be part of C++0x
-    class xyzzy {
-      int *data;
+    class xyzzy
+    {
+      int * data;
     public:
       xyzzy(int len) : data(new int[len]) {}
       ~xyzzy() { delete[] data; }
@@ -176,7 +177,7 @@ namespace pcre
     else if (rc < 0)
       pcre_exec_error(rc, made_from, subject_origin); // throws
 
-    for (int i=0; i < cap_count; ++i)
+    for (int i = 0; i < cap_count; ++i)
       {
         string match;
         // not an empty match
@@ -215,7 +216,7 @@ pcre_compile_error(int errcode, char con
     case 50: // [code allegedly not in use]
     case 52: // internal error: overran compiling workspace
     case 53: // internal error: previously-checked referenced subpattern
-             // not found
+      // not found
       throw oops((F("while compiling regex '%s': %s") % pattern % err)
                  .str().c_str());
 
@@ -229,7 +230,7 @@ pcre_compile_error(int errcode, char con
                               % pattern % err)
                            : (F("error near char %d of regex '%s': %s")
                               % (erroff + 1) % pattern % err)
-                           ));
+                          ));
     }
 }
 
============================================================
--- src/pcrewrap.hh	99182ce16ab33a2188f00c5f355e8e1e9d0d52e5
+++ src/pcrewrap.hh	0e242e97778053474920e79475906679e2cadb3c
@@ -24,30 +24,30 @@ namespace pcre
 namespace pcre
 {
   enum flags
-    {
-      // flags usable with both pcre_compile and pcre_exec
-      DEFAULT         = 0x0000,                    // no special behavior
-      NEWLINE_CR      = 0x0001,                    // newline is \r
-      NEWLINE_LF      = 0x0002,                    // newline is \n
-      NEWLINE_CRLF    = (NEWLINE_CR|NEWLINE_LF),   // newline is \r\n
-      ANCHORED        = 0x0004,                    // match only at beginning
-                                                   // of string (\A in pat)
-      // flags usable only with pcre_exec
-      NOTBOL          = 0x0008, // beginning of string isn't beginning of line
-      NOTEOL          = 0x0010, // end of string isn't end of line
-      NOTEMPTY        = 0x0020, // an empty match is a match failure
+  {
+    // flags usable with both pcre_compile and pcre_exec
+    DEFAULT         = 0x0000,                    // no special behavior
+    NEWLINE_CR      = 0x0001,                    // newline is \r
+    NEWLINE_LF      = 0x0002,                    // newline is \n
+    NEWLINE_CRLF    = (NEWLINE_CR | NEWLINE_LF), // newline is \r\n
+    ANCHORED        = 0x0004,                    // match only at beginning
+    // of string (\A in pat)
+    // flags usable only with pcre_exec
+    NOTBOL          = 0x0008, // beginning of string isn't beginning of line
+    NOTEOL          = 0x0010, // end of string isn't end of line
+    NOTEMPTY        = 0x0020, // an empty match is a match failure
 
-      // flags usable only with pcre_compile
-      CASELESS        = 0x0040, // case insensitive match (?i)
-      DOLLAR_ENDONLY  = 0x0080, // only in !MULTILINE mode, $ equiv to \Z
-      DOTALL          = 0x0100, // dot matches newline (?s)
-      DUPNAMES        = 0x0200, // permit duplicate names for named captures
-      EXTENDED        = 0x0400, // whitespace permitted in syntax (?x)
-      FIRSTLINE       = 0x0800, // match must begin before first newline
-      MULTILINE       = 0x1000, // ^ and $ match at internal newlines (?m)
-      UNGREEDY        = 0x4000, // quantifiers aren't greedy unless
-                                // followed with ? (opposite of default)
-    };
+    // flags usable only with pcre_compile
+    CASELESS        = 0x0040, // case insensitive match (?i)
+    DOLLAR_ENDONLY  = 0x0080, // only in !MULTILINE mode, $ equiv to \Z
+    DOTALL          = 0x0100, // dot matches newline (?s)
+    DUPNAMES        = 0x0200, // permit duplicate names for named captures
+    EXTENDED        = 0x0400, // whitespace permitted in syntax (?x)
+    FIRSTLINE       = 0x0800, // match must begin before first newline
+    MULTILINE       = 0x1000, // ^ and $ match at internal newlines (?m)
+    UNGREEDY        = 0x4000, // quantifiers aren't greedy unless
+    // followed with ? (opposite of default)
+  };
 
   // A regex object is the compiled form of a PCRE regular _expression_.
   struct regex
============================================================
--- src/project.cc	f3ba624e5361749e52f409da2fb6d6cb3455647d
+++ src/project.cc	c97e54e434e9fda218a5d35a338899d8cbf7292a
@@ -51,8 +51,8 @@ operator<<(std::ostream & os,
 operator<<(std::ostream & os,
            key_identity_info const & identity)
 {
-  os<<"{id="<<identity.id<<"; given_name="<<identity.given_name
-    <<"; official_name="<<identity.official_name<<"}";
+  os << "{id=" << identity.id << "; given_name=" << identity.given_name
+     << "; official_name=" << identity.official_name << "}";
   return os;
 }
 
@@ -165,9 +165,9 @@ project_t::get_branch_heads(branch_name 
                             multimap<revision_id, revision_id> * inverse_graph_cache_ptr) const
 {
   pair<branch_name, suspended_indicator>
-    cache_index(name, ignore_suspend_certs);
+  cache_index(name, ignore_suspend_certs);
   pair<outdated_indicator, set<revision_id> > &
-    branch = branch_heads[cache_index];
+  branch = branch_heads[cache_index];
   outdated_indicator & indicator = branch.first;
   set<revision_id> & my_heads = branch.second;
   if (indicator.outdated())
@@ -196,7 +196,8 @@ project_t::get_branch_heads(branch_name 
           my_heads = leaves;
         }
       else
-        { // bah, do it the slow way
+        {
+          // bah, do it the slow way
           indicator = db.get_revisions_with_cert(cert_name(branch_cert_name),
                                                  typecast_vocab<cert_value>(name),
                                                  my_heads);
@@ -707,14 +708,15 @@ notify_if_multiple_heads(project_t & pro
 {
   set<revision_id> heads;
   project.get_branch_heads(branchname, heads, ignore_suspend_certs);
-  if (heads.size() > 1) {
-    string prefixedline;
-    prefix_lines_with(_("note: "),
-                      _("branch '%s' has multiple heads\n"
-                        "perhaps consider '%s merge'"),
-                      prefixedline);
-    P(i18n_format(prefixedline) % branchname % prog_name);
-  }
+  if (heads.size() > 1)
+    {
+      string prefixedline;
+      prefix_lines_with(_("note: "),
+                        _("branch '%s' has multiple heads\n"
+                          "perhaps consider '%s merge'"),
+                        prefixedline);
+      P(i18n_format(prefixedline) % branchname % prog_name);
+    }
 }
 
 // Guess which branch is appropriate for a commit below IDENT.
============================================================
--- src/project.hh	a156012f665b7608020320598c08541ee340cd3c
+++ src/project.hh	18ee94d486bf013f7342320201f9790a8cd26f14
@@ -83,9 +83,9 @@ private:
   // These are caches of what's in the database. They are updated when
   // they're noticed to be out of date, which will always be during a
   // logically read-only operation.
-  mutable std::map<std::pair<branch_name, suspended_indicator>,
-                   std::pair<outdated_indicator, std::set<revision_id> >
-                   > branch_heads;
+  mutable std::map < std::pair<branch_name, suspended_indicator>,
+          std::pair<outdated_indicator, std::set<revision_id> >
+          > branch_heads;
   mutable std::set<branch_name> branches;
   mutable outdated_indicator indicator;
 
============================================================
--- src/asciik.cc	cf946f9a14ad309615704bc960255c50e12b636a
+++ src/asciik.cc	f5f8ac069640616c8e4ae07054ee8de46f38df52
@@ -282,7 +282,7 @@ asciik::try_draw(vector<revision_id> con
           if (found != next_row.end())
             {
               size_t j = distance(next_row.begin(), found);
-              size_t d = i>j ? i-j : j-i;
+              size_t d = i > j ? i - j : j - i;
               if (d > 1)
                 return false;
               if (d != 0)
@@ -299,9 +299,9 @@ asciik::try_draw(vector<revision_id> con
       {
         size_t i = curr_loc;
         size_t j = distance(next_row.begin(),
-          find(next_row.begin(), next_row.end(), *p));
+                            find(next_row.begin(), next_row.end(), *p));
         I(j < next_items);
-        size_t d = i>j ? i-j : j-i;
+        size_t d = i > j ? i - j : j - i;
         if ((d > 1) && have_shift)
           return false;
         parent_links.insert(pair<size_t, size_t>(i, j));
@@ -319,7 +319,7 @@ asciik::try_draw(vector<revision_id> con
 
   set<pair<size_t, size_t> > links(preservation_links);
   copy(parent_links.begin(), parent_links.end(),
-    insert_iterator<set<pair<size_t, size_t> > >(links, links.begin()));
+       insert_iterator<set<pair<size_t, size_t> > >(links, links.begin()));
   draw(curr_items, next_items, curr_loc, links, curr_ghosts, annotation);
   return true;
 }
@@ -332,7 +332,7 @@ asciik::print(revision_id const & rev,
   if (find(curr_row.begin(), curr_row.end(), rev) == curr_row.end())
     curr_row.push_back(rev);
   size_t curr_loc = distance(curr_row.begin(),
-    find(curr_row.begin(), curr_row.end(), rev));
+                             find(curr_row.begin(), curr_row.end(), rev));
   // it must be found as either it was there already or we just added it
   I(curr_loc < curr_row.size());
 
@@ -352,7 +352,7 @@ asciik::print(revision_id const & rev,
   // ghost handling has been done.
   vector<revision_id> no_ghost(next_row);
   vector<revision_id>::iterator first_ghost = find(no_ghost.begin(),
-    no_ghost.end(), ghost);
+                                                   no_ghost.end(), ghost);
   if (first_ghost != no_ghost.end())
     no_ghost.erase(first_ghost);
 
============================================================
--- src/ssh_agent.cc	6d188e7012a0b82f782563b09bbd7adf3f70cf75
+++ src/ssh_agent.cc	6db021ac057c655aaae66371b4af8c2e5f28dc66
@@ -124,14 +124,14 @@ get_long(char const * buf)
 get_long(char const * buf)
 {
   L((FL("ssh_agent: get_long: %u %u %u %u")
-     % widen<u32,char>(buf[0])
-     % widen<u32,char>(buf[1])
-     % widen<u32,char>(buf[2])
-     % widen<u32,char>(buf[3])));
-  return ((widen<u32,char>(buf[0]) << 24)
-          | (widen<u32,char>(buf[1]) << 16)
-          | (widen<u32,char>(buf[2]) << 8)
-          | widen<u32,char>(buf[3]));
+     % widen<u32, char>(buf[0])
+     % widen<u32, char>(buf[1])
+     % widen<u32, char>(buf[2])
+     % widen<u32, char>(buf[3])));
+  return ((widen<u32, char>(buf[0]) << 24)
+          | (widen<u32, char>(buf[1]) << 16)
+          | (widen<u32, char>(buf[2]) << 8)
+          | widen<u32, char>(buf[3]));
 }
 
 static u32
@@ -399,7 +399,7 @@ ssh_agent::has_key(const keypair & key)
 
   vector<RSA_PublicKey> ssh_keys = get_keys();
   for (vector<RSA_PublicKey>::const_iterator
-         si = ssh_keys.begin(); si != ssh_keys.end(); ++si)
+       si = ssh_keys.begin(); si != ssh_keys.end(); ++si)
     {
       if ((*pub_key).get_e() == (*si).get_e()
           && (*pub_key).get_n() == (*si).get_n())
============================================================
--- src/vocab_cast.hh	9710931398c21cebf7b008e4982e5cfc2f2b6614
+++ src/vocab_cast.hh	8050d9584166063f05430c115a1bff0cdaa0147d
@@ -23,8 +23,8 @@ void typecast_vocab_container(From const
 void typecast_vocab_container(From const & from, To & to)
 {
   std::transform(from.begin(), from.end(), std::inserter(to, to.end()),
-                 &typecast_vocab<typename To::value_type,
-                 typename From::value_type>);
+                 &typecast_vocab < typename To::value_type,
+                 typename From::value_type > );
 }
 
 // You won't use this directly either.
@@ -40,8 +40,8 @@ void add_decoration_to_container(From co
 void add_decoration_to_container(From const & from, To & to)
 {
   std::transform(from.begin(), from.end(), std::inserter(to, to.end()),
-                 &add_decoration<typename To::value_type,
-                 typename From::value_type>);
+                 &add_decoration < typename To::value_type,
+                 typename From::value_type > );
 }
 
 template<typename From, typename To>
============================================================
--- src/dates.cc	02648f8882988ad5122eccc39528249b6333b219
+++ src/dates.cc	6027cc482fe80ee75f83f00f26e916b87414c405
@@ -60,7 +60,8 @@ using std::tm;
 using std::tm;
 
 // Our own "struct tm"-like struct to represent broken-down times
-struct broken_down_time {
+struct broken_down_time
+{
   int millisec;    /* milliseconds (0 - 999) */
   int sec;         /* seconds (0 - 59) */
   int min;         /* minutes (0 - 59) */
@@ -103,14 +104,15 @@ u32 const SEC  = 1;
 
 // These constants are all in seconds.
 u32 const SEC  = 1;
-u32 const MIN  = 60*SEC;
-u32 const HOUR = 60*MIN;
-u64 const DAY  = 24*HOUR;
-u64 const YEAR = 365*DAY;
+u32 const MIN  = 60 * SEC;
+u32 const HOUR = 60 * MIN;
+u64 const DAY  = 24 * HOUR;
+u64 const YEAR = 365 * DAY;
 
 inline s64 MILLISEC(s64 n) { return n * 1000; }
 
-unsigned char const DAYS_PER_MONTH[] = {
+unsigned char const DAYS_PER_MONTH[] =
+{
   31, // jan
   28, // feb (non-leap)
   31, // mar
@@ -129,7 +131,7 @@ is_leap_year(s32 year)
 is_leap_year(s32 year)
 {
   return (year % 4 == 0
-    && (year % 100 != 0 || year % 400 == 0));
+          && (year % 100 != 0 || year % 400 == 0));
 }
 inline s32
 days_in_year(s32 year)
@@ -175,12 +177,12 @@ our_gmtime(s64 ts, broken_down_time & tb
   // This is the result of inverting the equation
   //    yb = y*365 + y/4 - y/100 + y/400
   // it approximates years since the epoch for any day count.
-  u32 year = (400*days / 146097);
+  u32 year = (400 * days / 146097);
 
   // Compute the _exact_ number of days from the epoch to the beginning of
   // the approximate year determined above.
   u64 yearbeg;
-  yearbeg = widen<u64,u32>(year)*365 + year/4 - year/100 + year/400;
+  yearbeg = widen<u64, u32>(year) * 365 + year / 4 - year / 100 + year / 400;
 
   // Our epoch is year 1, not year 0 (there is no year 0).
   year++;
@@ -347,8 +349,8 @@ date_t::as_iso_8601_extended() const
   I(valid());
   our_gmtime(d, tb);
   return (FL("%04u-%02u-%02uT%02u:%02u:%02u")
-             % tb.year % tb.month % tb.day
-             % tb.hour % tb.min % tb.sec).str();
+          % tb.year % tb.month % tb.day
+          % tb.hour % tb.min % tb.sec).str();
 }
 
 ostream &
@@ -375,7 +377,7 @@ date_t::as_formatted_localtime(string co
   // within range for the current time_t type so that localtime doesn't
   // produce a bad result.
 
-  s64 seconds = d/1000 - get_epoch_offset();
+  s64 seconds = d / 1000 - get_epoch_offset();
 
   L(FL("%s seconds UTC since unix epoch") % seconds);
 
@@ -529,9 +531,9 @@ date_t::date_t(string const & s)
       // seconds
       u8 sec;
       E(s.at(i) >= '0' && s.at(i) <= '9'
-        && s.at(i-1) >= '0' && s.at(i-1) <= '5', origin::user,
+        && s.at(i - 1) >= '0' && s.at(i - 1) <= '5', origin::user,
         F("unrecognized date (monotone only understands ISO 8601 format)"));
-      sec = (s.at(i-1) - '0')*10 + (s.at(i) - '0');
+      sec = (s.at(i - 1) - '0') * 10 + (s.at(i) - '0');
       i -= 2;
       E(sec <= 60, origin::user,
         F("seconds out of range"));
@@ -543,9 +545,9 @@ date_t::date_t(string const & s)
       // minutes
       u8 min;
       E(s.at(i) >= '0' && s.at(i) <= '9'
-        && s.at(i-1) >= '0' && s.at(i-1) <= '5', origin::user,
+        && s.at(i - 1) >= '0' && s.at(i - 1) <= '5', origin::user,
         F("unrecognized date (monotone only understands ISO 8601 format)"));
-      min = (s.at(i-1) - '0')*10 + (s.at(i) - '0');
+      min = (s.at(i - 1) - '0') * 10 + (s.at(i) - '0');
       i -= 2;
       E(min < 60, origin::user,
         F("minutes out of range"));
@@ -556,11 +558,11 @@ date_t::date_t(string const & s)
 
       // hours
       u8 hour;
-      E((s.at(i-1) >= '0' && s.at(i-1) <= '1'
+      E((s.at(i - 1) >= '0' && s.at(i - 1) <= '1'
          && s.at(i) >= '0' && s.at(i) <= '9')
-        || (s.at(i-1) == '2' && s.at(i) >= '0' && s.at(i) <= '3'), origin::user,
+        || (s.at(i - 1) == '2' && s.at(i) >= '0' && s.at(i) <= '3'), origin::user,
         F("unrecognized date (monotone only understands ISO 8601 format)"));
-      hour = (s.at(i-1) - '0')*10 + (s.at(i) - '0');
+      hour = (s.at(i - 1) - '0') * 10 + (s.at(i) - '0');
       i -= 2;
       E(hour < 24, origin::user,
         F("hour out of range"));
@@ -572,10 +574,10 @@ date_t::date_t(string const & s)
 
       // day
       u8 day;
-      E(s.at(i-1) >= '0' && s.at(i-1) <= '3'
+      E(s.at(i - 1) >= '0' && s.at(i - 1) <= '3'
         && s.at(i) >= '0' && s.at(i) <= '9', origin::user,
         F("unrecognized date (monotone only understands ISO 8601 format)"));
-      day = (s.at(i-1) - '0')*10 + (s.at(i) - '0');
+      day = (s.at(i - 1) - '0') * 10 + (s.at(i) - '0');
       i -= 2;
 
       // optional dash
@@ -584,10 +586,10 @@ date_t::date_t(string const & s)
 
       // month
       u8 month;
-      E(s.at(i-1) >= '0' && s.at(i-1) <= '1'
+      E(s.at(i - 1) >= '0' && s.at(i - 1) <= '1'
         && s.at(i) >= '0' && s.at(i) <= '9', origin::user,
         F("unrecognized date (monotone only understands ISO 8601 format)"));
-      month = (s.at(i-1) - '0')*10 + (s.at(i) - '0');
+      month = (s.at(i - 1) - '0') * 10 + (s.at(i) - '0');
       E(month >= 1 && month <= 12, origin::user,
         F("month out of range in '%s'") % s);
       i -= 2;
@@ -608,7 +610,7 @@ date_t::date_t(string const & s)
         {
           E(s.at(i) >= '0' && s.at(i) <= '9', origin::user,
             F("unrecognized date (monotone only understands ISO 8601 format)"));
-          year += (s.at(i) - '0')*digit;
+          year += (s.at(i) - '0') * digit;
           i--;
           digit *= 10;
         }
============================================================
--- src/dates.hh	3a730e5536cece2437e9caad0e4f8f2767e4887d
+++ src/dates.hh	009891adf5e29e47deb4c8021a05ca1391f279c2
@@ -26,7 +26,7 @@ struct date_t
 
   // initialize from broken-down time
   date_t(int year, int month, int day,
-         int hour=0, int min=0, int sec=0, int millisec=0);
+         int hour = 0, int min = 0, int sec = 0, int millisec = 0);
 
   // initialize from a string; presently recognizes only
   // ISO 8601 "basic" and "extended" time formats.
@@ -53,17 +53,17 @@ struct date_t
 
   // Date comparison operators
   bool operator <(date_t const & other) const
-    { return d < other.d; };
+  { return d < other.d; };
   bool operator <=(date_t const & other) const
-    { return d <= other.d; };
+  { return d <= other.d; };
   bool operator >(date_t const & other) const
-    { return d > other.d; };
+  { return d > other.d; };
   bool operator >=(date_t const & other) const
-    { return d >= other.d; };
+  { return d >= other.d; };
   bool operator ==(date_t const & other) const
-    { return d == other.d; };
+  { return d == other.d; };
   bool operator !=(date_t const & other) const
-    { return d != other.d; };
+  { return d != other.d; };
 
   // Addition and subtraction of millisecond amounts
   date_t & operator +=(s64 const other);
============================================================
--- src/specialized_lexical_cast.cc	21b197949d6fea0426a7e22a61baf4c2fceff758
+++ src/specialized_lexical_cast.cc	316e542247b7872612fa32ad692d37b9783e569d
@@ -27,10 +27,10 @@ std::string boost::lexical_cast<std::str
   while (i && pos <= maxlen)
     {
       --pos;
-      buf[pos] = ('0' + (i%10));
+      buf[pos] = ('0' + (i % 10));
       i /= 10;
     }
-  return std::string(buf+pos);
+  return std::string(buf + pos);
 }
 
 template<>
@@ -40,7 +40,7 @@ unsigned int boost::lexical_cast<unsigne
   std::string::const_iterator i;
   for (i = s.begin(); i != s.end() && (unsigned int)(*i - '0') < 10; ++i)
     {
-      out = out*10 + (*i - '0');
+      out = out * 10 + (*i - '0');
     }
   if (i != s.end())
     throw boost::bad_lexical_cast();
============================================================
--- src/lexical_cast.hh	fe70361a7da25ed93b4b46b266220fcfa29bf5c3
+++ src/lexical_cast.hh	6d5d88588259fedae57308d38fa84bfdc384ebb4
@@ -20,7 +20,8 @@
 // the generic template for the types we specialize here).  This is not
 // a theoretical problem; the Windows linker will fail.
 
-namespace boost {
+namespace boost
+{
   template<>
   std::string lexical_cast<std::string, unsigned int>(unsigned int const & _i);
 
============================================================
--- src/current_exception.hh	4cdea3209b8fbd10ba6ea4b71db8930dda3b4d08
+++ src/current_exception.hh	f2849b15fe31d4e665b15f72c3a5922a382e478e
@@ -17,29 +17,29 @@
 // around on MSDN, MSVC type_info.name() is already demangled, and there is
 // no documented equivalent of __cxa_current_exception_type().
 #ifdef HAVE_CXXABI_H
- #include <cxxabi.h>
- #ifdef HAVE___CXA_DEMANGLE
-  inline char const * demangle_typename(char const * name)
-  {
-    int status = -1;
-    char * dem = abi::__cxa_demangle(name, 0, 0, &status);
-    if (status == 0)
-      return dem;
-    else
-      return 0;
-  }
- #else
-  #define demangle_typename(x) 0
- #endif
- #ifdef HAVE___CXA_CURRENT_EXCEPTION_TYPE
-  #define get_current_exception_type() abi::__cxa_current_exception_type()
- #else
-  #define get_current_exception_type() 0
- #endif
+#include <cxxabi.h>
+#ifdef HAVE___CXA_DEMANGLE
+inline char const * demangle_typename(char const * name)
+{
+  int status = -1;
+  char * dem = abi::__cxa_demangle(name, 0, 0, &status);
+  if (status == 0)
+    return dem;
+  else
+    return 0;
+}
 #else
- #define demangle_typename(x) 0
- #define get_current_exception_type() 0
+#define demangle_typename(x) 0
 #endif
+#ifdef HAVE___CXA_CURRENT_EXCEPTION_TYPE
+#define get_current_exception_type() abi::__cxa_current_exception_type()
+#else
+#define get_current_exception_type() 0
+#endif
+#else
+#define demangle_typename(x) 0
+#define get_current_exception_type() 0
+#endif
 
 #endif
 
============================================================
--- src/base.hh	a8d19ce4d4aab1fcfbc2f868fd09779c3c0e3ffa
+++ src/base.hh	9b269776dd26976070e0eefb6d2b9af3fa0fa546
@@ -47,7 +47,8 @@ void dump(T const &, std::string &)
   // also we get better diagnostics this way (the error tells you what is
   // wrong, not just that there's an assertion failure).
   enum dummy { d = (sizeof(struct dump_must_be_specialized_for_this_type)
-                    == sizeof(T)) };
+                    == sizeof(T))
+             };
 }
 
 template <> void dump(std::string const & obj, std::string & out);
============================================================
--- src/gzip.cc	e7c19bee910e3d421fd073933810ec52092b1d9b
+++ src/gzip.cc	8cab5f5f7d09c2a9a31c350523de06cc262ae163
@@ -27,395 +27,397 @@
 #include <map>
 #include <zlib.h>
 
-namespace Botan {
+namespace Botan
+{
 
-namespace {
+  namespace
+  {
 
-/*************************************************
-* Allocation Information for Zlib                *
-*************************************************/
-class Zlib_Alloc_Info
-   {
-   public:
-      std::map<void*, u32bit> current_allocs;
-      Allocator* alloc;
+    /*************************************************
+    * Allocation Information for Zlib                *
+    *************************************************/
+    class Zlib_Alloc_Info
+    {
+    public:
+      std::map<void *, u32bit> current_allocs;
+      Allocator * alloc;
 
       Zlib_Alloc_Info() { alloc = Allocator::get(false); }
-   };
+    };
 
-/*************************************************
-* Allocation Function for Zlib                   *
-*************************************************/
-void* zlib_malloc(void* info_ptr, unsigned int n, unsigned int size)
-   {
-   Zlib_Alloc_Info* info = static_cast<Zlib_Alloc_Info*>(info_ptr);
-   void* ptr = info->alloc->allocate(n * size);
-   info->current_allocs[ptr] = n * size;
-   return ptr;
-   }
+    /*************************************************
+    * Allocation Function for Zlib                   *
+    *************************************************/
+    void * zlib_malloc(void * info_ptr, unsigned int n, unsigned int size)
+    {
+      Zlib_Alloc_Info * info = static_cast<Zlib_Alloc_Info *>(info_ptr);
+      void * ptr = info->alloc->allocate(n * size);
+      info->current_allocs[ptr] = n * size;
+      return ptr;
+    }
 
-/*************************************************
-* Allocation Function for Zlib                   *
-*************************************************/
-void zlib_free(void* info_ptr, void* ptr)
-   {
-   Zlib_Alloc_Info* info = static_cast<Zlib_Alloc_Info*>(info_ptr);
-   std::map<void*, u32bit>::const_iterator i = info->current_allocs.find(ptr);
-   if(i == info->current_allocs.end())
-      throw Invalid_Argument("zlib_free: Got pointer not allocated by us");
-   info->alloc->deallocate(ptr, i->second);
-   }
-}
+    /*************************************************
+    * Allocation Function for Zlib                   *
+    *************************************************/
+    void zlib_free(void * info_ptr, void * ptr)
+    {
+      Zlib_Alloc_Info * info = static_cast<Zlib_Alloc_Info *>(info_ptr);
+      std::map<void *, u32bit>::const_iterator i = info->current_allocs.find(ptr);
+      if(i == info->current_allocs.end())
+        throw Invalid_Argument("zlib_free: Got pointer not allocated by us");
+      info->alloc->deallocate(ptr, i->second);
+    }
+  }
 
-/*************************************************
-* Wrapper Type for Zlib z_stream                 *
-*************************************************/
-class Zlib_Stream
-   {
-   public:
-      z_stream stream;
+  /*************************************************
+  * Wrapper Type for Zlib z_stream                 *
+  *************************************************/
+  class Zlib_Stream
+  {
+  public:
+    z_stream stream;
 
-      Zlib_Stream()
-         {
-         std::memset(&stream, 0, sizeof(z_stream));
-         stream.zalloc = zlib_malloc;
-         stream.zfree = zlib_free;
-         stream.opaque = new Zlib_Alloc_Info;
-         }
-      ~Zlib_Stream()
-         {
-         Zlib_Alloc_Info* info = static_cast<Zlib_Alloc_Info*>(stream.opaque);
-         delete info;
-         std::memset(&stream, 0, sizeof(z_stream));
-         }
-   };
+    Zlib_Stream()
+    {
+      std::memset(&stream, 0, sizeof(z_stream));
+      stream.zalloc = zlib_malloc;
+      stream.zfree = zlib_free;
+      stream.opaque = new Zlib_Alloc_Info;
+    }
+    ~Zlib_Stream()
+    {
+      Zlib_Alloc_Info * info = static_cast<Zlib_Alloc_Info *>(stream.opaque);
+      delete info;
+      std::memset(&stream, 0, sizeof(z_stream));
+    }
+  };
 
-/*************************************************
-* Gzip_Compression Constructor                   *
-*************************************************/
-Gzip_Compression::Gzip_Compression(u32bit l) :
-   level((l >= 9) ? 9 : l), buffer(DEFAULT_BUFFERSIZE),
-   pipe(new Hash_Filter("CRC32")), count( 0 )
-   {
+  /*************************************************
+  * Gzip_Compression Constructor                   *
+  *************************************************/
+  Gzip_Compression::Gzip_Compression(u32bit l) :
+    level((l >= 9) ? 9 : l), buffer(DEFAULT_BUFFERSIZE),
+    pipe(new Hash_Filter("CRC32")), count( 0 )
+  {
 
-   zlib = new Zlib_Stream;
-   // window_bits == -15 relies on an undocumented feature of zlib, which
-   // supresses the zlib header on the message. We need that since gzip doesn't
-   // use this header.  The feature been confirmed to exist in 1.1.4, which
-   // everyone should be using due to security fixes. In later versions this
-   // feature is documented, along with the ability to do proper gzip output
-   // (that would be a nicer way to do things, but will have to wait until 1.2
-   // becomes more widespread).
-   // The other settings are the defaults that deflateInit() gives
-   if(deflateInit2(&(zlib->stream), level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY) != Z_OK)
+    zlib = new Zlib_Stream;
+    // window_bits == -15 relies on an undocumented feature of zlib, which
+    // supresses the zlib header on the message. We need that since gzip doesn't
+    // use this header.  The feature been confirmed to exist in 1.1.4, which
+    // everyone should be using due to security fixes. In later versions this
+    // feature is documented, along with the ability to do proper gzip output
+    // (that would be a nicer way to do things, but will have to wait until 1.2
+    // becomes more widespread).
+    // The other settings are the defaults that deflateInit() gives
+    if(deflateInit2(&(zlib->stream), level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY) != Z_OK)
       {
-      delete zlib; zlib = 0;
-      throw Exception("Gzip_Compression: Memory allocation error");
+        delete zlib; zlib = 0;
+        throw Exception("Gzip_Compression: Memory allocation error");
       }
-   }
+  }
 
-/*************************************************
-* Gzip_Compression Destructor                    *
-*************************************************/
-Gzip_Compression::~Gzip_Compression()
-   {
-   deflateEnd(&(zlib->stream));
-   delete zlib; zlib = 0;
-   }
+  /*************************************************
+  * Gzip_Compression Destructor                    *
+  *************************************************/
+  Gzip_Compression::~Gzip_Compression()
+  {
+    deflateEnd(&(zlib->stream));
+    delete zlib; zlib = 0;
+  }
 
-/*************************************************
-* Start Compressing with Gzip                    *
-*************************************************/
-void Gzip_Compression::start_msg()
-   {
-   clear();
-   put_header();
-   pipe.start_msg();
-   count = 0;
-   }
+  /*************************************************
+  * Start Compressing with Gzip                    *
+  *************************************************/
+  void Gzip_Compression::start_msg()
+  {
+    clear();
+    put_header();
+    pipe.start_msg();
+    count = 0;
+  }
 
-/*************************************************
-* Compress Input with Gzip                       *
-*************************************************/
-void Gzip_Compression::write(const byte input[], u32bit length)
-   {
+  /*************************************************
+  * Compress Input with Gzip                       *
+  *************************************************/
+  void Gzip_Compression::write(const byte input[], u32bit length)
+  {
 
-   count += length;
-   pipe.write(input, length);
+    count += length;
+    pipe.write(input, length);
 
-   zlib->stream.next_in = (Bytef*)input;
-   zlib->stream.avail_in = length;
+    zlib->stream.next_in = (Bytef *)input;
+    zlib->stream.avail_in = length;
 
-   while(zlib->stream.avail_in != 0)
+    while(zlib->stream.avail_in != 0)
       {
-      zlib->stream.next_out = (Bytef*)buffer.begin();
-      zlib->stream.avail_out = buffer.size();
-      int rc = deflate(&(zlib->stream), Z_NO_FLUSH);
-      if (rc != Z_OK && rc != Z_STREAM_END)
-         throw Exception("Internal error in Gzip_Compression deflate.");
-      send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
+        zlib->stream.next_out = (Bytef *)buffer.begin();
+        zlib->stream.avail_out = buffer.size();
+        int rc = deflate(&(zlib->stream), Z_NO_FLUSH);
+        if (rc != Z_OK && rc != Z_STREAM_END)
+          throw Exception("Internal error in Gzip_Compression deflate.");
+        send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
       }
-   }
+  }
 
-/*************************************************
-* Finish Compressing with Gzip                   *
-*************************************************/
-void Gzip_Compression::end_msg()
-   {
-   zlib->stream.next_in = 0;
-   zlib->stream.avail_in = 0;
+  /*************************************************
+  * Finish Compressing with Gzip                   *
+  *************************************************/
+  void Gzip_Compression::end_msg()
+  {
+    zlib->stream.next_in = 0;
+    zlib->stream.avail_in = 0;
 
-   int rc = Z_OK;
-   while(rc != Z_STREAM_END)
+    int rc = Z_OK;
+    while(rc != Z_STREAM_END)
       {
-      zlib->stream.next_out = (Bytef*)buffer.begin();
-      zlib->stream.avail_out = buffer.size();
-      rc = deflate(&(zlib->stream), Z_FINISH);
-      if (rc != Z_OK && rc != Z_STREAM_END)
-         throw Exception("Internal error in Gzip_Compression finishing deflate.");
-      send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
+        zlib->stream.next_out = (Bytef *)buffer.begin();
+        zlib->stream.avail_out = buffer.size();
+        rc = deflate(&(zlib->stream), Z_FINISH);
+        if (rc != Z_OK && rc != Z_STREAM_END)
+          throw Exception("Internal error in Gzip_Compression finishing deflate.");
+        send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
       }
 
-   pipe.end_msg();
-   put_footer();
-   clear();
-   }
+    pipe.end_msg();
+    put_footer();
+    clear();
+  }
 
-/*************************************************
-* Clean up Compression Context                   *
-*************************************************/
-void Gzip_Compression::clear()
-   {
-   deflateReset(&(zlib->stream));
-   }
+  /*************************************************
+  * Clean up Compression Context                   *
+  *************************************************/
+  void Gzip_Compression::clear()
+  {
+    deflateReset(&(zlib->stream));
+  }
 
-/*************************************************
-* Put a basic gzip header at the beginning       *
-*************************************************/
-void Gzip_Compression::put_header()
-   {
-   send(GZIP::GZIP_HEADER, sizeof(GZIP::GZIP_HEADER));
-   }
+  /*************************************************
+  * Put a basic gzip header at the beginning       *
+  *************************************************/
+  void Gzip_Compression::put_header()
+  {
+    send(GZIP::GZIP_HEADER, sizeof(GZIP::GZIP_HEADER));
+  }
 
-/*************************************************
-* Put a gzip footer at the end                   *
-*************************************************/
-void Gzip_Compression::put_footer()
-   {
-   // 4 byte CRC32, and 4 byte length field
-   SecureVector<byte> buf(4);
-   SecureVector<byte> tmpbuf(4);
+  /*************************************************
+  * Put a gzip footer at the end                   *
+  *************************************************/
+  void Gzip_Compression::put_footer()
+  {
+    // 4 byte CRC32, and 4 byte length field
+    SecureVector<byte> buf(4);
+    SecureVector<byte> tmpbuf(4);
 
-   pipe.read(tmpbuf.begin(), tmpbuf.size(), Pipe::LAST_MESSAGE);
+    pipe.read(tmpbuf.begin(), tmpbuf.size(), Pipe::LAST_MESSAGE);
 
-   // CRC32 is the reverse order to what gzip expects.
-   for (int i = 0; i < 4; i++)
+    // CRC32 is the reverse order to what gzip expects.
+    for (int i = 0; i < 4; i++)
       buf[3-i] = tmpbuf[i];
 
-   send(buf.begin(), buf.size());
+    send(buf.begin(), buf.size());
 
-   // Length - LSB first
-   for (int i = 0; i < 4; i++)
+    // Length - LSB first
+    for (int i = 0; i < 4; i++)
       buf[3-i] = get_byte(i, count);
 
-   send(buf.begin(), buf.size());
-   }
+    send(buf.begin(), buf.size());
+  }
 
-/*************************************************
-* Gzip_Decompression Constructor                 *
-*************************************************/
-Gzip_Decompression::Gzip_Decompression() : buffer(DEFAULT_BUFFERSIZE),
-   no_writes(true), pipe(new Hash_Filter("CRC32")), footer(0)
-   {
-   if (DEFAULT_BUFFERSIZE < sizeof(GZIP::GZIP_HEADER))
+  /*************************************************
+  * Gzip_Decompression Constructor                 *
+  *************************************************/
+  Gzip_Decompression::Gzip_Decompression() : buffer(DEFAULT_BUFFERSIZE),
+    no_writes(true), pipe(new Hash_Filter("CRC32")), footer(0)
+  {
+    if (DEFAULT_BUFFERSIZE < sizeof(GZIP::GZIP_HEADER))
       throw Exception("DEFAULT_BUFFERSIZE is too small");
 
-   zlib = new Zlib_Stream;
+    zlib = new Zlib_Stream;
 
-   // window_bits == -15 is raw zlib (no header) - see comment
-   // above about deflateInit2
-   if(inflateInit2(&(zlib->stream), -15) != Z_OK)
+    // window_bits == -15 is raw zlib (no header) - see comment
+    // above about deflateInit2
+    if(inflateInit2(&(zlib->stream), -15) != Z_OK)
       {
-      delete zlib; zlib = 0;
-      throw Exception("Gzip_Decompression: Memory allocation error");
+        delete zlib; zlib = 0;
+        throw Exception("Gzip_Decompression: Memory allocation error");
       }
-   }
+  }
 
-/*************************************************
-* Gzip_Decompression Destructor                  *
-*************************************************/
-Gzip_Decompression::~Gzip_Decompression()
-   {
-      inflateEnd(&(zlib->stream));
-      delete zlib; zlib = 0;
-   }
+  /*************************************************
+  * Gzip_Decompression Destructor                  *
+  *************************************************/
+  Gzip_Decompression::~Gzip_Decompression()
+  {
+    inflateEnd(&(zlib->stream));
+    delete zlib; zlib = 0;
+  }
 
-/*************************************************
-* Start Decompressing with Gzip                  *
-*************************************************/
-void Gzip_Decompression::start_msg()
-   {
-   if (!no_writes)
+  /*************************************************
+  * Start Decompressing with Gzip                  *
+  *************************************************/
+  void Gzip_Decompression::start_msg()
+  {
+    if (!no_writes)
       throw Exception("Gzip_Decompression: start_msg after already writing");
 
-   pipe.start_msg();
-   datacount = 0;
-   pos = 0;
-   in_footer = false;
-   }
+    pipe.start_msg();
+    datacount = 0;
+    pos = 0;
+    in_footer = false;
+  }
 
-/*************************************************
-* Decompress Input with Gzip                     *
-*************************************************/
-void Gzip_Decompression::write(const byte input[], u32bit length)
-   {
-   if(length) no_writes = false;
+  /*************************************************
+  * Decompress Input with Gzip                     *
+  *************************************************/
+  void Gzip_Decompression::write(const byte input[], u32bit length)
+  {
+    if(length) no_writes = false;
 
-   // If we're in the footer, take what we need, then go to the next block
-   if (in_footer)
+    // If we're in the footer, take what we need, then go to the next block
+    if (in_footer)
       {
-         u32bit eat_len = eat_footer(input, length);
-         input += eat_len;
-         length -= eat_len;
-         if (length == 0)
-            return;
+        u32bit eat_len = eat_footer(input, length);
+        input += eat_len;
+        length -= eat_len;
+        if (length == 0)
+          return;
       }
 
-   // Check the gzip header
-   if (pos < sizeof(GZIP::GZIP_HEADER))
+    // Check the gzip header
+    if (pos < sizeof(GZIP::GZIP_HEADER))
       {
-      u32bit len = std::min((u32bit)sizeof(GZIP::GZIP_HEADER)-pos, length);
-      u32bit cmplen = len;
-      // The last byte is the OS flag - we don't care about that
-      if (pos + len - 1 >= GZIP::HEADER_POS_OS)
-         cmplen--;
+        u32bit len = std::min((u32bit)sizeof(GZIP::GZIP_HEADER) - pos, length);
+        u32bit cmplen = len;
+        // The last byte is the OS flag - we don't care about that
+        if (pos + len - 1 >= GZIP::HEADER_POS_OS)
+          cmplen--;
 
-      if (std::memcmp(input, &GZIP::GZIP_HEADER[pos], cmplen) != 0)
-         {
-         throw Decoding_Error("Gzip_Decompression: Data integrity error in header");
-         }
-      input += len;
-      length -= len;
-      pos += len;
+        if (std::memcmp(input, &GZIP::GZIP_HEADER[pos], cmplen) != 0)
+          {
+            throw Decoding_Error("Gzip_Decompression: Data integrity error in header");
+          }
+        input += len;
+        length -= len;
+        pos += len;
       }
 
-   pos += length;
+    pos += length;
 
-   zlib->stream.next_in = (Bytef*)input;
-   zlib->stream.avail_in = length;
+    zlib->stream.next_in = (Bytef *)input;
+    zlib->stream.avail_in = length;
 
-   while(zlib->stream.avail_in != 0)
+    while(zlib->stream.avail_in != 0)
       {
-      zlib->stream.next_out = (Bytef*)buffer.begin();
-      zlib->stream.avail_out = buffer.size();
+        zlib->stream.next_out = (Bytef *)buffer.begin();
+        zlib->stream.avail_out = buffer.size();
 
-      int rc = inflate(&(zlib->stream), Z_SYNC_FLUSH);
-      if(rc != Z_OK && rc != Z_STREAM_END)
-         {
-         if(rc == Z_DATA_ERROR)
-            throw Decoding_Error("Gzip_Decompression: Data integrity error");
-         if(rc == Z_NEED_DICT)
-            throw Decoding_Error("Gzip_Decompression: Need preset dictionary");
-         if(rc == Z_MEM_ERROR)
-            throw Exception("Gzip_Decompression: Memory allocation error");
-         throw Exception("Gzip_Decompression: Unknown decompress error");
-         }
-      send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
-      pipe.write(buffer.begin(), buffer.size() - zlib->stream.avail_out);
-      datacount += buffer.size() - zlib->stream.avail_out;
+        int rc = inflate(&(zlib->stream), Z_SYNC_FLUSH);
+        if(rc != Z_OK && rc != Z_STREAM_END)
+          {
+            if(rc == Z_DATA_ERROR)
+              throw Decoding_Error("Gzip_Decompression: Data integrity error");
+            if(rc == Z_NEED_DICT)
+              throw Decoding_Error("Gzip_Decompression: Need preset dictionary");
+            if(rc == Z_MEM_ERROR)
+              throw Exception("Gzip_Decompression: Memory allocation error");
+            throw Exception("Gzip_Decompression: Unknown decompress error");
+          }
+        send(buffer.begin(), buffer.size() - zlib->stream.avail_out);
+        pipe.write(buffer.begin(), buffer.size() - zlib->stream.avail_out);
+        datacount += buffer.size() - zlib->stream.avail_out;
 
-      // Reached the end - we now need to check the footer
-      if(rc == Z_STREAM_END)
-         {
-         u32bit read_from_block = length - zlib->stream.avail_in;
-         u32bit eat_len = eat_footer((Bytef*)input + read_from_block, zlib->stream.avail_in);
-         read_from_block += eat_len;
-         input += read_from_block;
-         length -= read_from_block;
-         zlib->stream.next_in = (Bytef*)input;
-         zlib->stream.avail_in = length;
-         }
+        // Reached the end - we now need to check the footer
+        if(rc == Z_STREAM_END)
+          {
+            u32bit read_from_block = length - zlib->stream.avail_in;
+            u32bit eat_len = eat_footer((Bytef *)input + read_from_block, zlib->stream.avail_in);
+            read_from_block += eat_len;
+            input += read_from_block;
+            length -= read_from_block;
+            zlib->stream.next_in = (Bytef *)input;
+            zlib->stream.avail_in = length;
+          }
       }
-   }
+  }
 
-/*************************************************
-* Store the footer bytes                         *
-*************************************************/
-u32bit Gzip_Decompression::eat_footer(const byte input[], u32bit length)
-   {
-      if (footer.size() >= GZIP::FOOTER_LENGTH)
-         throw Decoding_Error("Gzip_Decompression: Data integrity error in footer");
+  /*************************************************
+  * Store the footer bytes                         *
+  *************************************************/
+  u32bit Gzip_Decompression::eat_footer(const byte input[], u32bit length)
+  {
+    if (footer.size() >= GZIP::FOOTER_LENGTH)
+      throw Decoding_Error("Gzip_Decompression: Data integrity error in footer");
 
-      u32bit eat_len = std::min(GZIP::FOOTER_LENGTH-footer.size(), length);
-      footer.append(input, eat_len);
+    u32bit eat_len = std::min(GZIP::FOOTER_LENGTH - footer.size(), length);
+    footer.append(input, eat_len);
 
-      if (footer.size() == GZIP::FOOTER_LENGTH)
-         {
-         check_footer();
-         clear();
-         }
+    if (footer.size() == GZIP::FOOTER_LENGTH)
+      {
+        check_footer();
+        clear();
+      }
 
-         return eat_len;
-   }
+    return eat_len;
+  }
 
-/*************************************************
-* Check the gzip footer                          *
-*************************************************/
-void Gzip_Decompression::check_footer()
-   {
-   if (footer.size() != GZIP::FOOTER_LENGTH)
+  /*************************************************
+  * Check the gzip footer                          *
+  *************************************************/
+  void Gzip_Decompression::check_footer()
+  {
+    if (footer.size() != GZIP::FOOTER_LENGTH)
       throw Exception("Gzip_Decompression: Error finalizing decompression");
 
-   pipe.end_msg();
+    pipe.end_msg();
 
-   // 4 byte CRC32, and 4 byte length field
-   SecureVector<byte> buf(4);
-   SecureVector<byte> tmpbuf(4);
-   pipe.read(tmpbuf.begin(), tmpbuf.size(), Pipe::LAST_MESSAGE);
+    // 4 byte CRC32, and 4 byte length field
+    SecureVector<byte> buf(4);
+    SecureVector<byte> tmpbuf(4);
+    pipe.read(tmpbuf.begin(), tmpbuf.size(), Pipe::LAST_MESSAGE);
 
-  // CRC32 is the reverse order to what gzip expects.
-  for (int i = 0; i < 4; i++)
-     buf[3-i] = tmpbuf[i];
+    // CRC32 is the reverse order to what gzip expects.
+    for (int i = 0; i < 4; i++)
+      buf[3-i] = tmpbuf[i];
 
-  tmpbuf.set(footer.begin(), 4);
-  if (buf != tmpbuf)
+    tmpbuf.set(footer.begin(), 4);
+    if (buf != tmpbuf)
       throw Decoding_Error("Gzip_Decompression: Data integrity error - CRC32 error");
 
-   // Check the length matches - it is encoded LSB-first
-   for (int i = 0; i < 4; i++)
+    // Check the length matches - it is encoded LSB-first
+    for (int i = 0; i < 4; i++)
       {
-      if (footer.begin()[GZIP::FOOTER_LENGTH-1-i] != get_byte(i, datacount))
-         throw Decoding_Error("Gzip_Decompression: Data integrity error - incorrect length");
+        if (footer.begin()[GZIP::FOOTER_LENGTH-1-i] != get_byte(i, datacount))
+          throw Decoding_Error("Gzip_Decompression: Data integrity error - incorrect length");
       }
 
-   }
+  }
 
-/*************************************************
-* Finish Decompressing with Gzip                 *
-*************************************************/
-void Gzip_Decompression::end_msg()
-   {
+  /*************************************************
+  * Finish Decompressing with Gzip                 *
+  *************************************************/
+  void Gzip_Decompression::end_msg()
+  {
 
-   // All messages should end with a footer, and when a footer is successfully
-   // read, clear() will reset no_writes
-   if(no_writes) return;
+    // All messages should end with a footer, and when a footer is successfully
+    // read, clear() will reset no_writes
+    if(no_writes) return;
 
-   throw Exception("Gzip_Decompression: didn't find footer");
+    throw Exception("Gzip_Decompression: didn't find footer");
 
-   }
+  }
 
-/*************************************************
-* Clean up Decompression Context                 *
-*************************************************/
-void Gzip_Decompression::clear()
-   {
-   no_writes = true;
-   inflateReset(&(zlib->stream));
+  /*************************************************
+  * Clean up Decompression Context                 *
+  *************************************************/
+  void Gzip_Decompression::clear()
+  {
+    no_writes = true;
+    inflateReset(&(zlib->stream));
 
-   footer.destroy();
-   pos = 0;
-   datacount = 0;
-   }
+    footer.destroy();
+    pos = 0;
+    datacount = 0;
+  }
 
 }
 
============================================================
--- src/gzip.hh	649dfc0b250954f39c0f73870f8bec3f32f7fa43
+++ src/gzip.hh	4a6632de2513393bf015f131abb768723708930a
@@ -10,76 +10,79 @@
 #include <botan/filter.h>
 #include <botan/pipe.h>
 
-namespace Botan {
+namespace Botan
+{
 
-namespace GZIP {
+  namespace GZIP
+  {
 
-   /* A basic header - we only need to set the IDs and compression method */
-   const byte GZIP_HEADER[] = {
+    /* A basic header - we only need to set the IDs and compression method */
+    const byte GZIP_HEADER[] =
+    {
       0x1f, 0x8b, /* Magic ID bytes */
       0x08, /* Compression method of 'deflate' */
       0x00, /* Flags all empty */
       0x00, 0x00, 0x00, 0x00, /* MTIME */
       0x00, /* Extra flags */
       0xff, /* Operating system (unknown) */
-   };
+    };
 
-   const unsigned int HEADER_POS_OS = 9;
+    const unsigned int HEADER_POS_OS = 9;
 
-   const unsigned int FOOTER_LENGTH = 8;
+    const unsigned int FOOTER_LENGTH = 8;
 
-}
+  }
 
-/*************************************************
-* Gzip Compression Filter                        *
-*************************************************/
-class Gzip_Compression : public Filter
-   {
-   public:
-      void write(const byte input[], u32bit length);
-      void start_msg();
-      void end_msg();
-      std::string name() const { return "Gzip_Compression"; }
+  /*************************************************
+  * Gzip Compression Filter                        *
+  *************************************************/
+  class Gzip_Compression : public Filter
+  {
+  public:
+    void write(const byte input[], u32bit length);
+    void start_msg();
+    void end_msg();
+    std::string name() const { return "Gzip_Compression"; }
 
-      Gzip_Compression(u32bit = 1);
-      ~Gzip_Compression();
-   private:
-      void clear();
-      void put_header();
-      void put_footer();
-      const u32bit level;
-      SecureVector<byte> buffer;
-      class Zlib_Stream* zlib;
-      Pipe pipe; /* A pipe for the crc32 processing */
-      u32bit count;
-   };
+    Gzip_Compression(u32bit = 1);
+    ~Gzip_Compression();
+  private:
+    void clear();
+    void put_header();
+    void put_footer();
+    const u32bit level;
+    SecureVector<byte> buffer;
+    class Zlib_Stream * zlib;
+    Pipe pipe; /* A pipe for the crc32 processing */
+    u32bit count;
+  };
 
-/*************************************************
-* Gzip Decompression Filter                      *
-*************************************************/
-class Gzip_Decompression : public Filter
-   {
-   public:
-      void write(const byte input[], u32bit length);
-      void start_msg();
-      void end_msg();
-      std::string name() const { return "Gzip_Decompression"; }
+  /*************************************************
+  * Gzip Decompression Filter                      *
+  *************************************************/
+  class Gzip_Decompression : public Filter
+  {
+  public:
+    void write(const byte input[], u32bit length);
+    void start_msg();
+    void end_msg();
+    std::string name() const { return "Gzip_Decompression"; }
 
-      Gzip_Decompression();
-      ~Gzip_Decompression();
-   private:
-      u32bit eat_footer(const byte input[], u32bit length);
-      void check_footer();
-      void clear();
-      SecureVector<byte> buffer;
-      class Zlib_Stream* zlib;
-      bool no_writes;
-      u32bit pos; /* Current position in the message */
-      Pipe pipe; /* A pipe for the crc32 processing */
-      u32bit datacount; /* Amount of uncompressed output */
-      SecureVector<byte> footer;
-      bool in_footer;
-   };
+    Gzip_Decompression();
+    ~Gzip_Decompression();
+  private:
+    u32bit eat_footer(const byte input[], u32bit length);
+    void check_footer();
+    void clear();
+    SecureVector<byte> buffer;
+    class Zlib_Stream * zlib;
+    bool no_writes;
+    u32bit pos; /* Current position in the message */
+    Pipe pipe; /* A pipe for the crc32 processing */
+    u32bit datacount; /* Amount of uncompressed output */
+    SecureVector<byte> footer;
+    bool in_footer;
+  };
 
 }
 
============================================================
--- src/char_classifiers.hh	380ead182576526e2343bd873b19114770015979
+++ src/char_classifiers.hh	0cead355dc925eed5f29289e1823562254206c09
@@ -44,11 +44,11 @@ inline bool is_space(char x)
 inline bool is_space(char x)
 {
   return (x == ' ')
-    || (x == '\n')
-    || (x == '\t')
-    || (x == '\r')
-    || (x == '\v')
-    || (x == '\f');
+         || (x == '\n')
+         || (x == '\t')
+         || (x == '\r')
+         || (x == '\v')
+         || (x == '\f');
 }
 
 inline bool is_upper(char x)
============================================================
--- src/automate_ostream.hh	33895c7883416f107f9c0174fb16c16b1b1db81d
+++ src/automate_ostream.hh	cf84e9fef2749917851238c5a016bf04da518e6f
@@ -17,7 +17,7 @@ using boost::lexical_cast;
 
 using boost::lexical_cast;
 
-template<typename _CharT, typename _Traits = std::char_traits<_CharT> >
+template < typename _CharT, typename _Traits = std::char_traits<_CharT> >
 class basic_automate_streambuf : public std::basic_streambuf<_CharT, _Traits>
 {
   typedef _Traits traits_type;
@@ -30,7 +30,7 @@ public:
   basic_automate_streambuf(std::ostream & o, size_t bufsize)
     : std::basic_streambuf<_CharT, _Traits>(), _bufsize(bufsize), out(&o), cmdnum(0)
   {
-    _CharT *inbuf = new _CharT[_bufsize];
+    _CharT * inbuf = new _CharT[_bufsize];
     this->setp(inbuf, inbuf + _bufsize);
   }
 
@@ -66,7 +66,7 @@ public:
         (*out) << cmdnum << ':'
                << 'm' << ':'
                << num << ':'
-               << std::basic_string<_CharT,_Traits>(this->pbase(), num);
+               << std::basic_string<_CharT, _Traits>(this->pbase(), num);
         this->setp(this->pbase(), this->pbase() + _bufsize);
         out->flush();
       }
@@ -77,17 +77,18 @@ public:
     unsigned chunksize = _bufsize;
     size_t length = data.size(), offset = 0;
     do
-    {
-      if (offset+chunksize>length)
-        chunksize = length-offset;
-      (*out) << cmdnum << ':' << type << ':' << chunksize
-             << ':' << data.substr(offset, chunksize);
-      offset+= chunksize;
-    } while (offset<length);
+      {
+        if (offset + chunksize > length)
+          chunksize = length - offset;
+        (*out) << cmdnum << ':' << type << ':' << chunksize
+               << ':' << data.substr(offset, chunksize);
+        offset += chunksize;
+      }
+    while (offset < length);
     out->flush();
   }
 
-  void write_headers(std::vector<std::pair<std::string,std::string> > const & headers)
+  void write_headers(std::vector<std::pair<std::string, std::string> > const & headers)
   {
     for (std::vector<std::pair<std::string, std::string> >::const_iterator h = headers.begin();
          h != headers.end(); ++h)
@@ -107,17 +108,17 @@ public:
   }
 };
 
-template<typename _CharT, typename _Traits = std::char_traits<_CharT> >
+template < typename _CharT, typename _Traits = std::char_traits<_CharT> >
 struct basic_automate_ostream : public std::basic_ostream<_CharT, _Traits>
 {
   typedef basic_automate_streambuf<_CharT, _Traits> streambuf_type;
   streambuf_type _M_autobuf;
 
   basic_automate_ostream(std::basic_ostream<_CharT, _Traits> &out,
-                   size_t blocksize)
+                         size_t blocksize)
     : std::basic_ostream<_CharT, _Traits>(&_M_autobuf),
       _M_autobuf(out, blocksize)
-  { /* this->init(&_M_autobuf); */ }
+{ /* this->init(&_M_autobuf); */ }
 
 protected:
   basic_automate_ostream() { }
@@ -136,7 +137,7 @@ public:
   virtual void write_out_of_band(char type, std::string const & data)
   { _M_autobuf.write_out_of_band(type, data); }
 
-  virtual void write_headers(std::vector<std::pair<std::string,std::string> > const & headers)
+  virtual void write_headers(std::vector<std::pair<std::string, std::string> > const & headers)
   { _M_autobuf.write_headers(headers); }
 };
 
============================================================
--- src/cmd_conflicts.cc	ad9dc74c27d4d8c50a74e5825ec9e0a12cdc1b09
+++ src/cmd_conflicts.cc	41cf02819e27393713a9bae4fbd57f6776cae3d5
@@ -40,11 +40,11 @@ struct conflicts_t
   };
 
   void write (database & db, lua_hooks & lua, bookkeeping_path const & file)
-    {
-      result.write_conflict_file
-        (db, lua, file, ancestor_rid, left_rid, right_rid,
-         left_roster, left_marking, right_roster, right_marking);
-    };
+  {
+    result.write_conflict_file
+    (db, lua, file, ancestor_rid, left_rid, right_rid,
+     left_roster, left_marking, right_roster, right_marking);
+  };
 };
 
 typedef enum {first, remaining} show_conflicts_case_t;
@@ -154,45 +154,45 @@ show_conflicts(database & db, conflicts_
   switch (show_case)
     {
     case first:
-      {
-        int const count = conflicts.result.count_unsupported_resolution();
-        if (count > 0)
-            P(FP("warning: %d conflict with no supported resolutions.",
-                 "warning: %d conflicts with no supported resolutions.",
-                 count) % count);
-        else
-          P(F("all conflicts resolved"));
-      }
-      break;
+    {
+      int const count = conflicts.result.count_unsupported_resolution();
+      if (count > 0)
+        P(FP("warning: %d conflict with no supported resolutions.",
+             "warning: %d conflicts with no supported resolutions.",
+             count) % count);
+      else
+        P(F("all conflicts resolved"));
+    }
+    break;
 
     case remaining:
-      {
-        int const count = conflicts.result.count_unsupported_resolution();
-        if (count > 0)
-          {
-            P(FP("warning: %d conflict with no supported resolutions.",
-                 "warning: %d conflicts with no supported resolutions.",
-                 count) % count);
+    {
+      int const count = conflicts.result.count_unsupported_resolution();
+      if (count > 0)
+        {
+          P(FP("warning: %d conflict with no supported resolutions.",
+               "warning: %d conflicts with no supported resolutions.",
+               count) % count);
 
-            content_merge_database_adaptor adaptor
-              (db, conflicts.left_rid, conflicts.right_rid, conflicts.left_marking, conflicts.right_marking);
+          content_merge_database_adaptor adaptor
+          (db, conflicts.left_rid, conflicts.right_rid, conflicts.left_marking, conflicts.right_marking);
 
-            conflicts.result.report_missing_root_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-            conflicts.result.report_invalid_name_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-            conflicts.result.report_directory_loop_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-            conflicts.result.report_orphaned_node_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-            conflicts.result.report_multiple_name_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-            conflicts.result.report_attribute_conflicts
-              (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
-          }
-      }
-      break;
+          conflicts.result.report_missing_root_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+          conflicts.result.report_invalid_name_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+          conflicts.result.report_directory_loop_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+          conflicts.result.report_orphaned_node_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+          conflicts.result.report_multiple_name_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+          conflicts.result.report_attribute_conflicts
+          (*conflicts.left_roster, *conflicts.right_roster, adaptor, false, std::cout);
+        }
     }
+    break;
+    }
 
 } // show_conflicts
 
@@ -264,7 +264,7 @@ set_duplicate_name_conflict(resolve_conf
     {
       E(args.size() == 2, origin::user, F("wrong number of arguments"));
       resolution.first  = resolve_conflicts::rename;
-      resolution.second = resolve_conflicts::new_file_path(idx(args,1)());
+      resolution.second = resolve_conflicts::new_file_path(idx(args, 1)());
     }
   else if ("user" == idx(args, 0)())
     {
@@ -276,11 +276,11 @@ set_duplicate_name_conflict(resolve_conf
         F("other resolution must be 'drop' or 'rename'"));
 
       resolution.first  = resolve_conflicts::content_user;
-      resolution.second = new_optimal_path(idx(args,1)(), false);
+      resolution.second = new_optimal_path(idx(args, 1)(), false);
     }
   else
     E(false, origin::user,
-      F(conflict_resolution_not_supported_msg) % idx(args,0) % "duplicate_name");
+      F(conflict_resolution_not_supported_msg) % idx(args, 0) % "duplicate_name");
 
 } //set_duplicate_name_conflict
 
@@ -333,23 +333,23 @@ set_first_conflict(database & db,
 
           if (conflict.resolution.first == resolve_conflicts::none)
             {
-              if ("drop" == idx(args,0)())
+              if ("drop" == idx(args, 0)())
                 {
                   E(args.size() == 1, origin::user, F("wrong number of arguments"));
 
                   conflict.resolution.first  = resolve_conflicts::drop;
                 }
-              else if ("rename" == idx(args,0)())
+              else if ("rename" == idx(args, 0)())
                 {
                   E(args.size() == 2, origin::user, F("wrong number of arguments"));
 
                   conflict.resolution.first  = resolve_conflicts::rename;
-                  conflict.resolution.second = new_optimal_path(idx(args,1)(), false);
+                  conflict.resolution.second = new_optimal_path(idx(args, 1)(), false);
                 }
               else
                 {
                   E(false, origin::user,
-                    F(conflict_resolution_not_supported_msg) % idx(args,0) % "orphaned_node");
+                    F(conflict_resolution_not_supported_msg) % idx(args, 0) % "orphaned_node");
                 }
               return;
             }
@@ -363,7 +363,7 @@ set_first_conflict(database & db,
 
           if (conflict.resolution.first == resolve_conflicts::none)
             {
-              if ("interactive" == idx(args,0)())
+              if ("interactive" == idx(args, 0)())
                 {
                   bookkeeping_path result_path;
 
@@ -371,23 +371,23 @@ set_first_conflict(database & db,
                     {
                     case 1:
                       // use default path for resolution file
-                      {
-                        file_path left_path;
-                        conflicts.left_roster->get_name(conflict.nid, left_path);
-                        result_path = bookkeeping_resolutions_dir / left_path;
-                      }
-                      break;
+                    {
+                      file_path left_path;
+                      conflicts.left_roster->get_name(conflict.nid, left_path);
+                      result_path = bookkeeping_resolutions_dir / left_path;
+                    }
+                    break;
 
                     case 2:
                       // user path for resolution file
-                      {
-                        string normalized;
-                        normalize_external_path(idx(args,1)(),
-                                                normalized,
-                                                false); // to_workspace_root
-                        result_path = bookkeeping_path(normalized, origin::user);
-                      }
-                      break;
+                    {
+                      string normalized;
+                      normalize_external_path(idx(args, 1)(),
+                                              normalized,
+                                              false); // to_workspace_root
+                      result_path = bookkeeping_path(normalized, origin::user);
+                    }
+                    break;
 
                     default:
                       E(false, origin::user, F("wrong number of arguments"));
@@ -403,19 +403,19 @@ set_first_conflict(database & db,
                   else
                     P(F("interactive merge failed."));
                 }
-              else if ("user" == idx(args,0)())
+              else if ("user" == idx(args, 0)())
                 {
                   E(args.size() == 2, origin::user, F("wrong number of arguments"));
 
                   conflict.resolution.first  = resolve_conflicts::content_user;
-                  conflict.resolution.second = new_optimal_path(idx(args,1)(), false);
+                  conflict.resolution.second = new_optimal_path(idx(args, 1)(), false);
                 }
               else
                 {
                   // We don't allow the user to specify 'resolved_internal'; that
                   // is only done by automate show_conflicts.
                   E(false, origin::user,
-                    F(conflict_resolution_not_supported_msg) % idx(args,0) % "file_content");
+                    F(conflict_resolution_not_supported_msg) % idx(args, 0) % "file_content");
                 }
               return;
             }
============================================================
--- src/origin_type.hh	0e329b2b10d081f52a8346044a6522a9fdc68552
+++ src/origin_type.hh	60e015b70df9ab47c92125245b2d57f26a7560d1
@@ -11,8 +11,10 @@
 #define __ORIGIN_TYPE_HH__
 
 // sanity.cc:type_to_string(type t) will need to match this
-namespace origin {
-  enum type {
+namespace origin
+{
+  enum type
+  {
     internal,
     network,
     database,
============================================================
--- src/cmd.cc	47891700f02feb1a6278c175fc56f5253c91ea49
+++ src/cmd.cc	43998ec9a6a25f9535dbecf3c777a0462829aa44
@@ -99,7 +99,8 @@ CMD_GROUP(user, "user", "", CMD_REF(__ro
           N_("Commands defined by the user"),
           "");
 
-namespace commands {
+namespace commands
+{
 
   void remove_command_name_from_args(command_id const & ident,
                                      args_vector & args,
@@ -131,7 +132,7 @@ namespace commands {
     cmd->preset_options(app.opts);
 
     option::concrete_option_set optset
-      = (options::opts::globals() | cmd->opts())
+    = (options::opts::globals() | cmd->opts())
       .instantiate(&app.opts);
 
     optset.from_command_line(app.reset_info.default_args);
@@ -142,8 +143,8 @@ namespace commands {
         app.lua.hook_get_default_command_options(subcmd_full_ident,
                                                  subcmd_defaults);
         (options::opts::globals() | subcmd->opts())
-          .instantiate(&app.opts)
-          .from_command_line(subcmd_defaults);
+        .instantiate(&app.opts)
+        .from_command_line(subcmd_defaults);
       }
 
     // at this point we process the data from _MTN/options if
@@ -160,7 +161,7 @@ namespace commands {
       {
         app.opts.args.clear();
         option::concrete_option_set subcmd_optset
-          = (options::opts::globals() | subcmd->opts())
+        = (options::opts::globals() | subcmd->opts())
           .instantiate(&app.opts);
         if (!separate_params)
           {
@@ -275,7 +276,7 @@ namespace commands {
           continue;
 
         size_t len = display_width(join_words(child->names(), ", ")) +
-            display_width(utf8("    "));
+                     display_width(utf8("    "));
         if (colabstract < len)
           colabstract = len;
 
@@ -391,14 +392,14 @@ namespace commands {
                          out);
         out << '\n'
             << format_text(F("For information on a specific command, type "
-                           "'mtn help <command_name> [subcommand_name ...]'."))
+                             "'mtn help <command_name> [subcommand_name ...]'."))
             << "\n\n"
             << format_text(F("To see more details about the commands of a "
-                           "particular group, type 'mtn help <group_name>'."))
+                             "particular group, type 'mtn help <group_name>'."))
             << "\n\n"
             << format_text(F("Note that you can always abbreviate a command "
-                           "name as long as it does not conflict with other "
-                           "names."))
+                             "name as long as it does not conflict with other "
+                             "names."))
             << "\n";
       }
     else
@@ -414,19 +415,19 @@ namespace commands {
   // Lua-defined user commands.
   class cmd_lua : public command
   {
-    lua_State *st;
+    lua_State * st;
     std::string const f_name;
   public:
     cmd_lua(std::string const & primary_name,
-                   std::string const & params,
-                   std::string const & abstract,
-                   std::string const & desc,
-                   lua_State *L_st,
-                   std::string const & func_name) :
-         command(primary_name, "", CMD_REF(user), false, false, params,
-                 abstract, desc, true,
-                 options::options_type() | options::opts::none, true),
-                 st(L_st), f_name(func_name)
+            std::string const & params,
+            std::string const & abstract,
+            std::string const & desc,
+            lua_State * L_st,
+            std::string const & func_name) :
+      command(primary_name, "", CMD_REF(user), false, false, params,
+              abstract, desc, true,
+              options::options_type() | options::opts::none, true),
+      st(L_st), f_name(func_name)
     {
       // because user commands are inserted after the normal
       // initialisation process
@@ -439,7 +440,7 @@ namespace commands {
       I(st);
       I(app.lua.check_lua_state(st));
 
-      app_state* app_p = get_app_state(st);
+      app_state * app_p = get_app_state(st);
       I(app_p == & app);
 
       Lua ll(st);
@@ -451,7 +452,7 @@ namespace commands {
 
       app.mtn_automate_allowed = true;
 
-      ll.call(args.size(),0);
+      ll.call(args.size(), 0);
 
       app.mtn_automate_allowed = false;
 
@@ -464,15 +465,15 @@ LUAEXT(alias_command, )
 
 LUAEXT(alias_command, )
 {
-  const char *old_cmd = luaL_checkstring(LS, -2);
-  const char *new_cmd = luaL_checkstring(LS, -1);
+  const char * old_cmd = luaL_checkstring(LS, -2);
+  const char * new_cmd = luaL_checkstring(LS, -1);
   E(old_cmd && new_cmd, origin::user,
     F("'%s' called with an invalid parameter") % "alias_command");
 
   args_vector args;
   args.push_back(arg_type(old_cmd, origin::user));
   commands::command_id id = commands::complete_command(args);
-  commands::command *old_cmd_p = CMD_REF(__root__)->find_command(id);
+  commands::command * old_cmd_p = CMD_REF(__root__)->find_command(id);
 
   old_cmd_p->add_alias(utf8(new_cmd));
 
@@ -483,11 +484,11 @@ LUAEXT(register_command, )
 
 LUAEXT(register_command, )
 {
-  const char *cmd_name = luaL_checkstring(LS, -5);
-  const char *cmd_params = luaL_checkstring(LS, -4);
-  const char *cmd_abstract = luaL_checkstring(LS, -3);
-  const char *cmd_desc = luaL_checkstring(LS, -2);
-  const char *cmd_func = luaL_checkstring(LS, -1);
+  const char * cmd_name = luaL_checkstring(LS, -5);
+  const char * cmd_params = luaL_checkstring(LS, -4);
+  const char * cmd_abstract = luaL_checkstring(LS, -3);
+  const char * cmd_desc = luaL_checkstring(LS, -2);
+  const char * cmd_func = luaL_checkstring(LS, -1);
 
   E(cmd_name && cmd_params && cmd_abstract && cmd_desc && cmd_func,
     origin::user,
@@ -506,9 +507,9 @@ CMD_NO_WORKSPACE(help, "help", "", CMD_R
 
 CMD_NO_WORKSPACE(help, "help", "", CMD_REF(informative),
                  N_("command [ARGS...]"),
-    N_("Displays help about commands and options"),
-    "",
-    options::opts::show_hidden_commands)
+                 N_("Displays help about commands and options"),
+                 "",
+                 options::opts::show_hidden_commands)
 {
   if (args.size() < 1)
     {
@@ -522,9 +523,9 @@ CMD_NO_WORKSPACE(version, "version", "",
 }
 
 CMD_NO_WORKSPACE(version, "version", "", CMD_REF(informative), "",
-    N_("Shows the program version"),
-    "",
-    options::opts::full)
+                 N_("Shows the program version"),
+                 "",
+                 options::opts::full)
 {
   E(args.empty(), origin::user,
     F("no arguments allowed"));
@@ -541,8 +542,8 @@ CMD_HIDDEN(check_glob, "check_glob", "",
            "",
            options::opts::none)
 {
-  globish g = typecast_vocab<globish>(idx(args,0));
-  string s(idx(args,1)());
+  globish g = typecast_vocab<globish>(idx(args, 0));
+  string s(idx(args, 1)());
 
   E(g.matches(s), origin::user,
     F("Glob '%s' does not match string '%s'") % g % s);
@@ -557,15 +558,15 @@ CMD_HIDDEN(crash, "crash", "", CMD_REF(d
   if (args.size() != 1)
     throw usage(execid);
   bool spoon_exists(false);
-  if (idx(args,0)() == "N")
+  if (idx(args, 0)() == "N")
     E(spoon_exists, origin::user, i18n_format("There is no spoon."));
-  else if (idx(args,0)() == "E")
+  else if (idx(args, 0)() == "E")
     E(spoon_exists, origin::system, i18n_format("There is no spoon."));
-  else if (idx(args,0)() == "I")
+  else if (idx(args, 0)() == "I")
     {
       I(spoon_exists);
     }
-  else if (idx(args,0)() == "double-throw")
+  else if (idx(args, 0)() == "double-throw")
     {
       // This code is rather picky, for example I(false) in the destructor
       // won't always work like it should; see http://bugs.debian.org/516862
@@ -602,7 +603,7 @@ CMD_HIDDEN(crash, "crash", "", CMD_REF(d
 #ifndef _WIN32
       try
         {
-          int signo = boost::lexical_cast<int>(idx(args,0)());
+          int signo = boost::lexical_cast<int>(idx(args, 0)());
           if (0 < signo && signo <= 15)
             {
               raise(signo);
@@ -610,8 +611,9 @@ CMD_HIDDEN(crash, "crash", "", CMD_REF(d
               I(!"crash: raise returned");
             }
         }
-      catch (boost::bad_lexical_cast&)
-        { // fall through and throw usage
+      catch (boost::bad_lexical_cast &)
+        {
+          // fall through and throw usage
         }
 #endif
       throw usage(execid);
@@ -680,7 +682,7 @@ man_definition(vector<string> const & la
     }
   out += man_hyphens(content);
   if (content.rfind('\n') != (content.size() - 1))
-     out += "\n";
+    out += "\n";
 
   return out;
 }
@@ -714,7 +716,7 @@ man_title(string const & title)
 static string
 man_title(string const & title)
 {
-  return ".TH \"" + title + "\" 1 "+
+  return ".TH \"" + title + "\" 1 " +
          "\"" + BUILD_DATE + "\" " +
          "\"" + PACKAGE_STRING + "\"\n";
 }
@@ -809,7 +811,7 @@ get_commands(options & opts, commands::c
         {
           vector<string> full_ident;
           for (vector<utf8>::const_iterator j = main_ident.begin() + 1;
-                j < main_ident.end() - 1;  ++j)
+               j < main_ident.end() - 1;  ++j)
             {
               full_ident.push_back((*j)());
             }
@@ -888,8 +890,8 @@ get_command_groups(options & opts)
     {
       commands::command const * group = *i;
       out += man_subsection(
-        (F("command group '%s'") % group->primary_name()).str()
-      );
+               (F("command group '%s'") % group->primary_name()).str()
+             );
       out += group->desc() + "\n";
 
       out += get_commands(opts, group);
@@ -900,12 +902,12 @@ CMD_PRESET_OPTIONS(manpage)
 
 CMD_PRESET_OPTIONS(manpage)
 {
-    opts.formatted = isatty(STDOUT_FILENO);
+  opts.formatted = isatty(STDOUT_FILENO);
 }
 CMD_NO_WORKSPACE(manpage, "manpage", "", CMD_REF(informative), "",
-    N_("Generate a manual page from monotone's command help"),
-    "",
-    options::opts::show_hidden_commands | options::opts::formatted)
+                 N_("Generate a manual page from monotone's command help"),
+                 "",
+                 options::opts::show_hidden_commands | options::opts::formatted)
 {
   stringstream ss;
   ss << man_title("monotone");
@@ -925,11 +927,11 @@ CMD_NO_WORKSPACE(manpage, "manpage", "",
           "interface for scripting purposes and thorough documentation.")
      << "\n\n";
   ss << (F("For more information on monotone, visit %s.")
-          % man_bold(PACKAGE_URL)).str()
+         % man_bold(PACKAGE_URL)).str()
      << "\n\n";
   ss << (F("The complete documentation, including a tutorial for a quick start "
            "with the system, can be found online on %s.")
-          % man_bold(PACKAGE_URL "/docs")).str() << "\n";
+         % man_bold(PACKAGE_URL "/docs")).str() << "\n";
 
   ss << man_section(_("Global Options"));
   ss << get_options_string(options::opts::globals(), app.opts, 25) << "\n";
@@ -939,11 +941,11 @@ CMD_NO_WORKSPACE(manpage, "manpage", "",
 
   ss << man_section(_("See Also"));
   ss << (F("info %s and the documentation on %s")
-          % prog_name % man_bold(PACKAGE_URL "/docs")).str() << "\n";
+         % prog_name % man_bold(PACKAGE_URL "/docs")).str() << "\n";
 
   ss << man_section(_("Bugs"));
   ss << (F("Please report bugs to %s.")
-          % man_bold(PACKAGE_BUGREPORT)).str()<< "\n";
+         % man_bold(PACKAGE_BUGREPORT)).str() << "\n";
 
   ss << man_section(_("Authors"));
   ss << _("monotone was written originally by Graydon Hoare "
@@ -957,7 +959,7 @@ CMD_NO_WORKSPACE(manpage, "manpage", "",
   ss << man_section(_("Copyright"));
   ss << (F("monotone and this man page is Copyright (c) 2003 \\- %s by "
            "the monotone development team.")
-           % string(BUILD_DATE).substr(0, 4)).str() << "\n";
+         % string(BUILD_DATE).substr(0, 4)).str() << "\n";
 
   if (!app.opts.formatted)
     {
@@ -972,7 +974,7 @@ CMD_NO_WORKSPACE(manpage, "manpage", "",
   FILE * fp = popen(cmd.c_str(), "w");
   E(fp != NULL, origin::system,
     F("could not execute man page formatter command '%s': %s")
-      % cmd % strerror(errno));
+    % cmd % strerror(errno));
 
   fprintf(fp, ss.str().c_str());
   pclose(fp);
============================================================
--- src/migrate_ancestry.cc	e193114e2881198a24c5d7ed9a7ad8742aa0f191
+++ src/migrate_ancestry.cc	007f5268fed64add67cd6943a9d970676f370bc6
@@ -121,61 +121,62 @@ is_ancestor(database & db,
   return is_ancestor(ancestor_id, descendent_id, graph);
 }
 
-namespace {
+namespace
+{
 
-struct anc_graph
-{
-  anc_graph(bool existing, database & db, key_store & keys,
-            project_t & project) :
-    existing_graph(existing),
-    db(db),
-    keys(keys),
-    project(project),
-    max_node(0),
-    n_nodes("nodes", "n", 1),
-    n_certs_in("certs in", "c", 1),
-    n_revs_out("revs out", "r", 1),
-    n_certs_out("certs out", "C", 1)
-  {}
+  struct anc_graph
+  {
+    anc_graph(bool existing, database & db, key_store & keys,
+              project_t & project) :
+      existing_graph(existing),
+      db(db),
+      keys(keys),
+      project(project),
+      max_node(0),
+      n_nodes("nodes", "n", 1),
+      n_certs_in("certs in", "c", 1),
+      n_revs_out("revs out", "r", 1),
+      n_certs_out("certs out", "C", 1)
+    {}
 
-  bool existing_graph;
-  database & db;
-  key_store & keys;
-  project_t & project;
-  u64 max_node;
+    bool existing_graph;
+    database & db;
+    key_store & keys;
+    project_t & project;
+    u64 max_node;
 
-  ticker n_nodes;
-  ticker n_certs_in;
-  ticker n_revs_out;
-  ticker n_certs_out;
+    ticker n_nodes;
+    ticker n_certs_in;
+    ticker n_revs_out;
+    ticker n_certs_out;
 
-  map<u64,manifest_id> node_to_old_man;
-  map<manifest_id,u64> old_man_to_node;
+    map<u64, manifest_id> node_to_old_man;
+    map<manifest_id, u64> old_man_to_node;
 
-  map<u64,revision_id> node_to_old_rev;
-  map<revision_id,u64> old_rev_to_node;
+    map<u64, revision_id> node_to_old_rev;
+    map<revision_id, u64> old_rev_to_node;
 
-  map<u64,revision_id> node_to_new_rev;
-  map<revision_id,u64> new_rev_to_node;
+    map<u64, revision_id> node_to_new_rev;
+    map<revision_id, u64> new_rev_to_node;
 
-  map<u64, legacy::renames_map> node_to_renames;
+    map<u64, legacy::renames_map> node_to_renames;
 
-  multimap<u64, pair<cert_name, cert_value> > certs;
-  multimap<u64, u64> ancestry;
-  set<string> branches;
+    multimap<u64, pair<cert_name, cert_value> > certs;
+    multimap<u64, u64> ancestry;
+    set<string> branches;
 
-  void add_node_ancestry(u64 child, u64 parent);
-  void write_certs();
-  void kluge_for_bogus_merge_edges();
-  void rebuild_ancestry(set<string> const & attrs_to_drop);
-  void get_node_manifest(u64 node, manifest_id & man);
-  u64 add_node_for_old_manifest(manifest_id const & man);
-  u64 add_node_for_oldstyle_revision(revision_id const & rev);
-  void construct_revisions_from_ancestry(set<string> const & attrs_to_drop);
-  void fixup_node_identities(parent_roster_map const & parent_rosters,
-                             roster_t & child_roster,
-                             legacy::renames_map const & renames);
-};
+    void add_node_ancestry(u64 child, u64 parent);
+    void write_certs();
+    void kluge_for_bogus_merge_edges();
+    void rebuild_ancestry(set<string> const & attrs_to_drop);
+    void get_node_manifest(u64 node, manifest_id & man);
+    u64 add_node_for_old_manifest(manifest_id const & man);
+    u64 add_node_for_oldstyle_revision(revision_id const & rev);
+    void construct_revisions_from_ancestry(set<string> const & attrs_to_drop);
+    void fixup_node_identities(parent_roster_map const & parent_rosters,
+                               roster_t & child_roster,
+                               legacy::renames_map const & renames);
+  };
 
 }
 
@@ -187,7 +188,7 @@ void anc_graph::get_node_manifest(u64 no
 
 void anc_graph::get_node_manifest(u64 node, manifest_id & man)
 {
-  map<u64,manifest_id>::const_iterator i = node_to_old_man.find(node);
+  map<u64, manifest_id>::const_iterator i = node_to_old_man.find(node);
   I(i != node_to_old_man.end());
   man = i->second;
 }
@@ -202,7 +203,7 @@ void anc_graph::write_certs()
         char buf[constants::epochlen_bytes];
 #if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,7,7)
         lazy_rng::get().randomize(reinterpret_cast<Botan::byte *>(buf),
-                                 constants::epochlen_bytes);
+                                  constants::epochlen_bytes);
 #else
         Botan::Global_RNG::randomize(reinterpret_cast<Botan::byte *>(buf),
                                      constants::epochlen_bytes);
@@ -218,12 +219,12 @@ void anc_graph::write_certs()
 
   typedef multimap<u64, pair<cert_name, cert_value> >::const_iterator ci;
 
-  for (map<u64,revision_id>::const_iterator i = node_to_new_rev.begin();
+  for (map<u64, revision_id>::const_iterator i = node_to_new_rev.begin();
        i != node_to_new_rev.end(); ++i)
     {
       revision_id rev(i->second);
 
-      pair<ci,ci> range = certs.equal_range(i->first);
+      pair<ci, ci> range = certs.equal_range(i->first);
 
       for (ci j = range.first; j != range.second; ++j)
         {
@@ -260,10 +261,10 @@ anc_graph::kluge_for_bogus_merge_edges()
 
   P(F("scanning for bogus merge edges"));
 
-  multimap<u64,u64> parent_to_child_map;
-    for (multimap<u64, u64>::const_iterator i = ancestry.begin();
-         i != ancestry.end(); ++i)
-      parent_to_child_map.insert(make_pair(i->second, i->first));
+  multimap<u64, u64> parent_to_child_map;
+  for (multimap<u64, u64>::const_iterator i = ancestry.begin();
+       i != ancestry.end(); ++i)
+    parent_to_child_map.insert(make_pair(i->second, i->first));
 
   map<u64, u64> edges_to_kill;
   for (multimap<u64, u64>::const_iterator i = ancestry.begin();
@@ -462,7 +463,7 @@ not_dead_yet(node_id nid, u64 birth_rev,
                   return false;
                 }
               typedef multimap<u64, u64>::const_iterator ci;
-              pair<ci,ci> range = child_to_parents.equal_range(curr);
+              pair<ci, ci> range = child_to_parents.equal_range(curr);
               for (ci i = range.first; i != range.second; ++i)
                 {
                   if (i->first != curr)
@@ -613,7 +614,7 @@ struct
 }
 
 struct
-current_rev_debugger
+  current_rev_debugger
 {
   u64 node;
   anc_graph const & agraph;
@@ -627,7 +628,7 @@ dump(current_rev_debugger const & d, str
 dump(current_rev_debugger const & d, string & out)
 {
   typedef multimap<u64, pair<cert_name, cert_value> >::const_iterator ci;
-  pair<ci,ci> range = d.agraph.certs.equal_range(d.node);
+  pair<ci, ci> range = d.agraph.certs.equal_range(d.node);
   for(ci i = range.first; i != range.second; ++i)
     {
       if (i->first == d.node)
@@ -651,8 +652,8 @@ anc_graph::construct_revisions_from_ance
   // need to worry about one side of the frontier advancing faster than
   // another.
 
-  typedef multimap<u64,u64>::const_iterator ci;
-  multimap<u64,u64> parent_to_child_map;
+  typedef multimap<u64, u64>::const_iterator ci;
+  multimap<u64, u64> parent_to_child_map;
   deque<u64> work;
   set<u64> done;
 
@@ -666,7 +667,7 @@ anc_graph::construct_revisions_from_ance
         parent_to_child_map.insert(make_pair(i->second, i->first));
         children.insert(i->first);
       }
-    for (map<u64,manifest_id>::const_iterator i = node_to_old_man.begin();
+    for (map<u64, manifest_id>::const_iterator i = node_to_old_man.begin();
          i != node_to_old_man.end(); ++i)
       {
         all.insert(i->first);
@@ -690,22 +691,22 @@ anc_graph::construct_revisions_from_ance
       if (done.find(child) != done.end())
         continue;
 
-      pair<ci,ci> parent_range = ancestry.equal_range(child);
+      pair<ci, ci> parent_range = ancestry.equal_range(child);
       set<u64> parents;
       bool parents_all_done = true;
       for (ci i = parent_range.first; parents_all_done && i != parent_range.second; ++i)
-      {
-        if (i->first != child)
-          continue;
-        u64 parent = i->second;
-        if (done.find(parent) == done.end())
-          {
-            work.push_back(child);
-            parents_all_done = false;
-          }
-        else
-          parents.insert(parent);
-      }
+        {
+          if (i->first != child)
+            continue;
+          u64 parent = i->second;
+          if (done.find(parent) == done.end())
+            {
+              work.push_back(child);
+              parents_all_done = false;
+            }
+          else
+            parents.insert(parent);
+        }
 
       if (parents_all_done
           && (node_to_new_rev.find(child) == node_to_new_rev.end()))
@@ -779,9 +780,9 @@ anc_graph::construct_revisions_from_ance
                     if (child_roster.has_node(j->first))
                       {
                         map<string, string> const &
-                          fattrs = j->second;
+                        fattrs = j->second;
                         for (map<string, string>::const_iterator
-                               k = fattrs.begin();
+                             k = fattrs.begin();
                              k != fattrs.end(); ++k)
                           {
                             string key = k->first;
@@ -884,7 +885,7 @@ anc_graph::construct_revisions_from_ance
           safe_insert(done, child);
 
           // Extend the work queue with all the children of this child
-          pair<ci,ci> grandchild_range = parent_to_child_map.equal_range(child);
+          pair<ci, ci> grandchild_range = parent_to_child_map.equal_range(child);
           for (ci i = grandchild_range.first;
                i != grandchild_range.second; ++i)
             {
============================================================
--- src/ancestry.cc	8b3388b690a5f4878bd29d752c3e6e073411739e
+++ src/ancestry.cc	bef401a1c928d3ddad8ee9277e68da204a7767ad
@@ -179,7 +179,7 @@ calculate_ancestors_from_graph(interner<
       ctx us = stk.top();
       revision_id rev(intern.lookup(us), origin::internal);
 
-      pair<gi,gi> parents = graph.equal_range(rev);
+      pair<gi, gi> parents = graph.equal_range(rev);
       bool pushed = false;
 
       // first make sure all parents are done
@@ -305,9 +305,9 @@ erase_ancestors_and_failures(database & 
   if (inverse_graph_cache_ptr == NULL)
     inverse_graph_cache_ptr = &inverse_graph;
   if (inverse_graph_cache_ptr->empty())
-  {
-    db.get_reverse_ancestry(*inverse_graph_cache_ptr);
-  }
+    {
+      db.get_reverse_ancestry(*inverse_graph_cache_ptr);
+    }
 
   // Keep a set of all ancestors that we've traversed -- to avoid
   // combinatorial explosion.
@@ -420,14 +420,14 @@ ancestry_difference(database & db, revis
   *au -= *u;
 
   for (unsigned int i = 0; i != au->size(); ++i)
-  {
-    if (au->test(i))
-      {
-        revision_id rid(intern.lookup(i), origin::internal);
-        if (!null_id(rid))
-          new_stuff.insert(rid);
-      }
-  }
+    {
+      if (au->test(i))
+        {
+          revision_id rid(intern.lookup(i), origin::internal);
+          if (!null_id(rid))
+            new_stuff.insert(rid);
+        }
+    }
 }
 
 void
@@ -450,13 +450,14 @@ select_nodes_modified_by_rev(database & 
                                     edge_nodes_modified);
 
       copy(edge_nodes_modified.begin(), edge_nodes_modified.end(),
-                inserter(nodes_modified, nodes_modified.begin()));
+           inserter(nodes_modified, nodes_modified.begin()));
     }
 }
 
 // These functions create new ancestry!
 
-namespace {
+namespace
+{
   struct true_node_id_source
     : public node_id_source
   {
@@ -553,14 +554,14 @@ graph_loader::load_parents(revision_id c
 
 void
 graph_loader::load_parents(revision_id const rid,
-                          set<revision_id> & parents)
+                           set<revision_id> & parents)
 {
   db.get_revision_parents(rid, parents);
 }
 
 void
 graph_loader::load_children(revision_id const rid,
-                           set<revision_id> & children)
+                            set<revision_id> & children)
 {
   db.get_revision_children(rid, children);
 }
@@ -579,7 +580,7 @@ graph_loader::load_revs(load_direction c
 
 void
 graph_loader::load_revs(load_direction const direction,
-                       set<revision_id> & revs)
+                        set<revision_id> & revs)
 {
   std::deque<revision_id> next(revs.begin(), revs.end());
 
============================================================
--- src/merge_3way.cc	f415f5ddbcfafbc8c822b2e3a848f48f968287af
+++ src/merge_3way.cc	1ff26ba435cf766c0f450ae3be61df5f3815aba9
@@ -71,11 +71,11 @@ static const char etab[3][10] =
 
 typedef enum { preserved = 0, deleted = 1, changed = 2 } edit_t;
 static const char etab[3][10] =
-  {
-    "preserved",
-    "deleted",
-    "changed"
-  };
+{
+  "preserved",
+  "deleted",
+  "changed"
+};
 
 struct extent
 {
@@ -169,97 +169,97 @@ void normalize_extents(vector<extent> & 
   for (size_t i = 0; i < a_b_map.size(); ++i)
     {
       if (i > 0)
-      {
-        size_t j = i;
-        while (j > 0
-               && (a_b_map.at(j-1).type == preserved)
-               && (a_b_map.at(j).type == changed)
-               && (a.at(j) == b.at(a_b_map.at(j).pos + a_b_map.at(j).len - 1)))
-          {
-            // This is implied by (a_b_map.at(j-1).type == preserved)
-            I(a.at(j-1) == b.at(a_b_map.at(j-1).pos));
+        {
+          size_t j = i;
+          while (j > 0
+                 && (a_b_map.at(j - 1).type == preserved)
+                 && (a_b_map.at(j).type == changed)
+                 && (a.at(j) == b.at(a_b_map.at(j).pos + a_b_map.at(j).len - 1)))
+            {
+              // This is implied by (a_b_map.at(j-1).type == preserved)
+              I(a.at(j - 1) == b.at(a_b_map.at(j - 1).pos));
 
-            // Coming into loop we have:
-            //                     i
-            //  z   --pres-->  z   0
-            //  o   --pres-->  o   1
-            //  a   --chng-->  a   2   The important thing here is that 'a' in
-            //                 t       the LHS matches with ...
-            //                 u
-            //                 v
-            //                 a       ... the a on the RHS here. Hence we can
-            //  q  --pres-->   q   3   'shift' the entire 'changed' block
-            //  e  --chng-->   d   4   upwards, leaving a 'preserved' line
-            //  g  --pres-->   g   5   'a'->'a'
-            //
-            //  Want to end up with:
-            //                     i
-            //  z   --pres-->  z   0
-            //  o   --chng-->  o   1
-            //                 a
-            //                 t
-            //                 u
-            //                 v
-            //  a  --pres-->   a   2
-            //  q  --pres-->   q   3
-            //  e  --chng-->   d   4
-            //  g  --pres-->   g   5
-            //
-            // Now all the 'changed' extents are normalised to the
-            // earliest possible position.
+              // Coming into loop we have:
+              //                     i
+              //  z   --pres-->  z   0
+              //  o   --pres-->  o   1
+              //  a   --chng-->  a   2   The important thing here is that 'a' in
+              //                 t       the LHS matches with ...
+              //                 u
+              //                 v
+              //                 a       ... the a on the RHS here. Hence we can
+              //  q  --pres-->   q   3   'shift' the entire 'changed' block
+              //  e  --chng-->   d   4   upwards, leaving a 'preserved' line
+              //  g  --pres-->   g   5   'a'->'a'
+              //
+              //  Want to end up with:
+              //                     i
+              //  z   --pres-->  z   0
+              //  o   --chng-->  o   1
+              //                 a
+              //                 t
+              //                 u
+              //                 v
+              //  a  --pres-->   a   2
+              //  q  --pres-->   q   3
+              //  e  --chng-->   d   4
+              //  g  --pres-->   g   5
+              //
+              // Now all the 'changed' extents are normalised to the
+              // earliest possible position.
 
-            L(FL("exchanging preserved extent [%d+%d] with changed extent [%d+%d]")
-              % a_b_map.at(j-1).pos
-              % a_b_map.at(j-1).len
-              % a_b_map.at(j).pos
-              % a_b_map.at(j).len);
+              L(FL("exchanging preserved extent [%d+%d] with changed extent [%d+%d]")
+                % a_b_map.at(j - 1).pos
+                % a_b_map.at(j - 1).len
+                % a_b_map.at(j).pos
+                % a_b_map.at(j).len);
 
-            swap(a_b_map.at(j-1).len, a_b_map.at(j).len);
-            swap(a_b_map.at(j-1).type, a_b_map.at(j).type);
+              swap(a_b_map.at(j - 1).len, a_b_map.at(j).len);
+              swap(a_b_map.at(j - 1).type, a_b_map.at(j).type);
 
-            // Adjust position of the later, preserved extent. It should
-            // better point to the second 'a' in the above example.
-            a_b_map.at(j).pos = a_b_map.at(j-1).pos + a_b_map.at(j-1).len;
+              // Adjust position of the later, preserved extent. It should
+              // better point to the second 'a' in the above example.
+              a_b_map.at(j).pos = a_b_map.at(j - 1).pos + a_b_map.at(j - 1).len;
 
-            --j;
-          }
-      }
+              --j;
+            }
+        }
     }
 
   for (size_t i = 0; i < a_b_map.size(); ++i)
     {
       if (i > 0)
-      {
-        size_t j = i;
-        while (j > 0
-               && a_b_map.at(j).type == changed
-               && a_b_map.at(j-1).type == changed
-               && a_b_map.at(j).len > 1
-               && a_b_map.at(j-1).pos + a_b_map.at(j-1).len == a_b_map.at(j).pos)
-          {
-            // step 1: move a chunk from this insert extent to its
-            // predecessor
-            size_t piece = a_b_map.at(j).len - 1;
-            //      L(FL("moving change piece of len %d from pos %d to pos %d")
-            //        % piece
-            //        % a_b_map.at(j).pos
-            //        % a_b_map.at(j-1).pos);
-            a_b_map.at(j).len = 1;
-            a_b_map.at(j).pos += piece;
-            a_b_map.at(j-1).len += piece;
+        {
+          size_t j = i;
+          while (j > 0
+                 && a_b_map.at(j).type == changed
+                 && a_b_map.at(j - 1).type == changed
+                 && a_b_map.at(j).len > 1
+                 && a_b_map.at(j - 1).pos + a_b_map.at(j - 1).len == a_b_map.at(j).pos)
+            {
+              // step 1: move a chunk from this insert extent to its
+              // predecessor
+              size_t piece = a_b_map.at(j).len - 1;
+              //      L(FL("moving change piece of len %d from pos %d to pos %d")
+              //        % piece
+              //        % a_b_map.at(j).pos
+              //        % a_b_map.at(j-1).pos);
+              a_b_map.at(j).len = 1;
+              a_b_map.at(j).pos += piece;
+              a_b_map.at(j - 1).len += piece;
 
-            // step 2: if this extent (now of length 1) has become a "changed"
-            // extent identical to its previous state, switch it to a "preserved"
-            // extent.
-            if (b.at(a_b_map.at(j).pos) == a.at(j))
-              {
-                //              L(FL("changing normalized 'changed' extent at %d to 'preserved'")
-                //                % a_b_map.at(j).pos);
-                a_b_map.at(j).type = preserved;
-              }
-            --j;
-          }
-      }
+              // step 2: if this extent (now of length 1) has become a "changed"
+              // extent identical to its previous state, switch it to a "preserved"
+              // extent.
+              if (b.at(a_b_map.at(j).pos) == a.at(j))
+                {
+                  //              L(FL("changing normalized 'changed' extent at %d to 'preserved'")
+                  //                % a_b_map.at(j).pos);
+                  a_b_map.at(j).type = preserved;
+                }
+              --j;
+            }
+        }
     }
 }
 
@@ -449,7 +449,7 @@ bool merge3(vector<string> const & ances
             vector<string> & merged)
 {
   try
-   {
+    {
       merge_via_edit_scripts(ancestor, left, right, merged);
     }
   catch(conflict &)
============================================================
--- src/merge_conflict.cc	1c09dc4a20532ce040c429d7fb5ed2a25a199856
+++ src/merge_conflict.cc	ea11613afa8d7315cc63e82d5c30ed748f8586a1
@@ -87,7 +87,7 @@ namespace resolve_conflicts
   new_file_path(string path)
   {
     return shared_ptr<any_path>
-      (new file_path(file_path_external(utf8(path, origin::user))));
+           (new file_path(file_path_external(utf8(path, origin::user))));
   };
 }
 
@@ -881,11 +881,11 @@ roster_merge_result::report_orphaned_nod
       basic_io::stanza st;
 
       if (type == file_type)
-          if (basic_io)
-            st.push_str_pair(syms::conflict, syms::orphaned_file);
-          else
-            P(F("conflict: orphaned file '%s' from revision %s")
-              % lca_name % lca_rid);
+        if (basic_io)
+          st.push_str_pair(syms::conflict, syms::orphaned_file);
+        else
+          P(F("conflict: orphaned file '%s' from revision %s")
+            % lca_name % lca_rid);
       else
         {
           if (basic_io)
@@ -915,13 +915,12 @@ roster_merge_result::report_orphaned_nod
             {
               if (basic_io)
                 put_rename_conflict_left (st, adaptor, conflict.nid);
+              else if (type == file_type)
+                P(F("file '%s' was renamed from '%s' on the left")
+                  % orphan_name % lca_name);
               else
-                if (type == file_type)
-                  P(F("file '%s' was renamed from '%s' on the left")
-                    % orphan_name % lca_name);
-                else
-                  P(F("directory '%s' was renamed from '%s' on the left")
-                    % orphan_name % lca_name);
+                P(F("directory '%s' was renamed from '%s' on the left")
+                  % orphan_name % lca_name);
             }
           else
             {
@@ -958,25 +957,23 @@ roster_merge_result::report_orphaned_nod
             {
               if (basic_io)
                 put_rename_conflict_right (st, adaptor, conflict.nid);
+              else if (type == file_type)
+                P(F("file '%s' was renamed from '%s' on the right")
+                  % orphan_name % lca_name);
               else
-                if (type == file_type)
-                  P(F("file '%s' was renamed from '%s' on the right")
-                    % orphan_name % lca_name);
-                else
-                  P(F("directory '%s' was renamed from '%s' on the right")
-                    % orphan_name % lca_name);
+                P(F("directory '%s' was renamed from '%s' on the right")
+                  % orphan_name % lca_name);
             }
           else
             {
               if (basic_io)
                 put_added_conflict_right (st, adaptor, conflict.nid);
+              else if (type == file_type)
+                P(F("file '%s' was added on the right")
+                  % orphan_name);
               else
-                if (type == file_type)
-                  P(F("file '%s' was added on the right")
-                    % orphan_name);
-                else
-                  P(F("directory '%s' was added on the right")
-                    % orphan_name);
+                P(F("directory '%s' was added on the right")
+                  % orphan_name);
             }
         }
       else
@@ -1138,7 +1135,7 @@ roster_merge_result::report_duplicate_na
               else
                 P(F("added as a new directory on the right"));
             }
-         }
+        }
       else if (!left_lca_roster->has_node(right_nid) &&
                right_lca_roster->has_node(left_nid))
         {
@@ -1147,11 +1144,10 @@ roster_merge_result::report_duplicate_na
 
           if (basic_io)
             put_rename_conflict_left (st, adaptor, left_nid);
+          else if (left_type == file_type)
+            P(F("renamed from file '%s' on the left") % left_lca_name);
           else
-            if (left_type == file_type)
-              P(F("renamed from file '%s' on the left") % left_lca_name);
-            else
-              P(F("renamed from directory '%s' on the left") % left_lca_name);
+            P(F("renamed from directory '%s' on the left") % left_lca_name);
 
           if (basic_io)
             put_added_conflict_right (st, adaptor, right_nid);
@@ -1660,9 +1656,9 @@ read_orphaned_node_conflicts(basic_io::p
 
 static void
 read_orphaned_node_conflicts(basic_io::parser & pars,
-                            std::vector<orphaned_node_conflict> & conflicts,
-                            roster_t const & left_roster,
-                            roster_t const & right_roster)
+                             std::vector<orphaned_node_conflict> & conflicts,
+                             roster_t const & left_roster,
+                             roster_t const & right_roster)
 {
   while (pars.tok.in.lookahead != EOF && (pars.symp(syms::orphaned_directory) || pars.symp(syms::orphaned_file)))
     {
@@ -1867,7 +1863,7 @@ validate_duplicate_name_conflicts(basic_
         {
           std::vector<duplicate_name_conflict>::iterator tmp = i;
           E(++tmp == conflicts.end(), origin::user,
-             F(conflicts_mismatch_msg));
+            F(conflicts_mismatch_msg));
         }
     }
 } // validate_duplicate_name_conflicts
@@ -2298,9 +2294,9 @@ roster_merge_result::resolve_orphaned_no
 
 void
 roster_merge_result::resolve_orphaned_node_conflicts(lua_hooks & lua,
-                                                      roster_t const & left_roster,
-                                                      roster_t const & right_roster,
-                                                      content_merge_adaptor & adaptor)
+                                                     roster_t const & left_roster,
+                                                     roster_t const & right_roster,
+                                                     content_merge_adaptor & adaptor)
 {
   MM(left_roster);
   MM(right_roster);
@@ -2344,7 +2340,7 @@ roster_merge_result::resolve_orphaned_no
         case resolve_conflicts::rename:
           P(F("renaming '%s' to '%s'") % name % *conflict.resolution.second);
           attach_node
-            (lua, roster, conflict.nid, file_path_internal (conflict.resolution.second->as_internal()));
+          (lua, roster, conflict.nid, file_path_internal (conflict.resolution.second->as_internal()));
           break;
 
         case resolve_conflicts::none:
@@ -2374,32 +2370,32 @@ resolve_duplicate_name_one_side(lua_hook
   switch (resolution.first)
     {
     case resolve_conflicts::content_user:
-      {
-        E(other_resolution.first == resolve_conflicts::drop ||
-          other_resolution.first == resolve_conflicts::rename,
-          origin::user,
-          F("inconsistent left/right resolutions for '%s'") % name);
+    {
+      E(other_resolution.first == resolve_conflicts::drop ||
+        other_resolution.first == resolve_conflicts::rename,
+        origin::user,
+        F("inconsistent left/right resolutions for '%s'") % name);
 
-        P(F("replacing content of '%s' with '%s'") % name % resolution.second->as_external());
+      P(F("replacing content of '%s' with '%s'") % name % resolution.second->as_external());
 
-        file_id result_fid;
-        file_data parent_data, result_data;
-        data result_raw_data;
-        adaptor.get_version(fid, parent_data);
+      file_id result_fid;
+      file_data parent_data, result_data;
+      data result_raw_data;
+      adaptor.get_version(fid, parent_data);
 
-        read_data(*resolution.second, result_raw_data);
+      read_data(*resolution.second, result_raw_data);
 
-        result_data = file_data(result_raw_data);
-        calculate_ident(result_data, result_fid);
+      result_data = file_data(result_raw_data);
+      calculate_ident(result_data, result_fid);
 
-        file_t result_node = downcast_to_file_t(result_roster.get_node_for_update(nid));
-        result_node->content = result_fid;
+      file_t result_node = downcast_to_file_t(result_roster.get_node_for_update(nid));
+      result_node->content = result_fid;
 
-        adaptor.record_file(fid, result_fid, parent_data, result_data);
+      adaptor.record_file(fid, result_fid, parent_data, result_data);
 
-        attach_node(lua, result_roster, nid, name);
-      }
-      break;
+      attach_node(lua, result_roster, nid, name);
+    }
+    break;
 
     case resolve_conflicts::drop:
       P(F("dropping '%s'") % name);
@@ -2425,7 +2421,7 @@ resolve_duplicate_name_one_side(lua_hook
     case resolve_conflicts::rename:
       P(F("renaming '%s' to '%s'") % name % *resolution.second);
       attach_node
-        (lua, result_roster, nid, file_path_internal (resolution.second->as_internal()));
+      (lua, result_roster, nid, file_path_internal (resolution.second->as_internal()));
       break;
 
     case resolve_conflicts::none:
@@ -2465,7 +2461,7 @@ roster_merge_result::resolve_duplicate_n
       MM(conflict);
 
       node_id left_nid = conflict.left_nid;
-      node_id right_nid= conflict.right_nid;
+      node_id right_nid = conflict.right_nid;
 
       file_path left_name, right_name;
       file_id left_fid, right_fid;
@@ -2489,10 +2485,10 @@ roster_merge_result::resolve_duplicate_n
         }
 
       resolve_duplicate_name_one_side
-        (lua, conflict.left_resolution, conflict.right_resolution, left_name, left_fid, left_nid, adaptor, roster);
+      (lua, conflict.left_resolution, conflict.right_resolution, left_name, left_fid, left_nid, adaptor, roster);
 
       resolve_duplicate_name_one_side
-        (lua, conflict.right_resolution, conflict.left_resolution, right_name, right_fid, right_nid, adaptor, roster);
+      (lua, conflict.right_resolution, conflict.left_resolution, right_name, right_fid, right_nid, adaptor, roster);
     } // end for
 
   duplicate_name_conflicts.clear();
@@ -2527,47 +2523,47 @@ roster_merge_result::resolve_file_conten
 
       switch (conflict.resolution.first)
         {
-          case resolve_conflicts::content_internal:
-          case resolve_conflicts::none:
-            {
-              file_id merged_id;
+        case resolve_conflicts::content_internal:
+        case resolve_conflicts::none:
+        {
+          file_id merged_id;
 
-              E(resolve_conflicts::do_auto_merge(lua, conflict, adaptor, left_roster,
-                                                 right_roster, this->roster, merged_id),
-                origin::user,
-                F("merge of '%s', '%s' failed") % left_name % right_name);
+          E(resolve_conflicts::do_auto_merge(lua, conflict, adaptor, left_roster,
+                                             right_roster, this->roster, merged_id),
+            origin::user,
+            F("merge of '%s', '%s' failed") % left_name % right_name);
 
-              P(F("merged '%s', '%s'") % left_name % right_name);
+          P(F("merged '%s', '%s'") % left_name % right_name);
 
-              file_t result_node = downcast_to_file_t(roster.get_node_for_update(conflict.nid));
-              result_node->content = merged_id;
-            }
-            break;
+          file_t result_node = downcast_to_file_t(roster.get_node_for_update(conflict.nid));
+          result_node->content = merged_id;
+        }
+        break;
 
-          case resolve_conflicts::content_user:
-            {
-              P(F("replacing content of '%s', '%s' with '%s'") %
-                left_name % right_name % conflict.resolution.second->as_external());
+        case resolve_conflicts::content_user:
+        {
+          P(F("replacing content of '%s', '%s' with '%s'") %
+            left_name % right_name % conflict.resolution.second->as_external());
 
-              file_id result_id;
-              file_data left_data, right_data, result_data;
-              data result_raw_data;
-              adaptor.get_version(conflict.left, left_data);
-              adaptor.get_version(conflict.right, right_data);
+          file_id result_id;
+          file_data left_data, right_data, result_data;
+          data result_raw_data;
+          adaptor.get_version(conflict.left, left_data);
+          adaptor.get_version(conflict.right, right_data);
 
-              read_data(*conflict.resolution.second, result_raw_data);
+          read_data(*conflict.resolution.second, result_raw_data);
 
-              result_data = file_data(result_raw_data);
-              calculate_ident(result_data, result_id);
+          result_data = file_data(result_raw_data);
+          calculate_ident(result_data, result_id);
 
-              file_t result_node = downcast_to_file_t(roster.get_node_for_update(conflict.nid));
-              result_node->content = result_id;
+          file_t result_node = downcast_to_file_t(roster.get_node_for_update(conflict.nid));
+          result_node->content = result_id;
 
-              adaptor.record_merge(conflict.left, conflict.right, result_id,
-                                   left_data, right_data, result_data);
+          adaptor.record_merge(conflict.left, conflict.right, result_id,
+                               left_data, right_data, result_data);
 
-            }
-            break;
+        }
+        break;
 
         default:
           I(false);
============================================================
--- src/git_export.cc	05a325fef7bd56f529e55db02fd2542acd5d06f7
+++ src/git_export.cc	fc30050129f802453cbcb42f128bccec194862cb
@@ -70,10 +70,10 @@ read_mappings(system_path const & path, 
     {
       string line = trim(*i);
       size_t index = line.find('=');
-      if (index != string::npos || index < line.length()-1)
+      if (index != string::npos || index < line.length() - 1)
         {
           string key = trim(line.substr(0, index));
-          string value = trim(line.substr(index+1));
+          string value = trim(line.substr(index + 1));
           mappings[key] = value;
         }
       else if (!line.empty())
@@ -85,7 +85,7 @@ validate_author_mappings(lua_hooks & lua
 validate_author_mappings(lua_hooks & lua,
                          map<string, string> const & authors)
 {
-  for (map<string, string>::const_iterator i = authors.begin(); 
+  for (map<string, string>::const_iterator i = authors.begin();
        i != authors.end(); ++i)
     {
       E(lua.hook_validate_git_author(i->second), origin::user,
@@ -124,7 +124,7 @@ import_marks(system_path const & marks_f
       E(c == '\n', origin::user, F("incomplete line in marks file"));
 
       marked_revs[revid] = mark;
-      if (mark > mark_id) mark_id = mark+1;
+      if (mark > mark_id) mark_id = mark + 1;
       marks.peek();
     }
 }
@@ -136,7 +136,7 @@ export_marks(system_path const & marks_f
 {
   ostringstream marks;
   for (map<revision_id, size_t>::const_iterator
-         i = marked_revs.begin(); i != marked_revs.end(); ++i)
+       i = marked_revs.begin(); i != marked_revs.end(); ++i)
     marks << ":" << i->second << " " << i->first << "\n";
 
   data mark_data(marks.str(), origin::internal);
@@ -168,7 +168,7 @@ load_changes(database & db,
   loaded.set_total(revisions.size());
 
   for (vector<revision_id>::const_reverse_iterator
-         r = revisions.rbegin(); r != revisions.rend(); ++r)
+       r = revisions.rbegin(); r != revisions.rend(); ++r)
     {
       revision_t revision;
       db.get_revision(*r, revision);
@@ -227,7 +227,7 @@ export_changes(database & db, lua_hooks 
   map<string, string> valid_authors(author_map);
 
   for (vector<revision_id>::const_iterator
-         r = revisions.begin(); r != revisions.end(); ++r)
+       r = revisions.begin(); r != revisions.end(); ++r)
     {
       revnum++;
 
@@ -293,7 +293,7 @@ export_changes(database & db, lua_hooks 
       //
       // all keys that have signed author certs:
       //
-      // 'select distinct public_keys.name 
+      // 'select distinct public_keys.name
       //  from public_keys
       //  left join revision_certs on revision_certs.keypair_id = public_keys.id
       //  where revision_certs.name = "author"'
@@ -405,7 +405,7 @@ export_changes(database & db, lua_hooks 
       // emit file data blobs for modified and added files
 
       for (add_iterator
-             i = change.additions.begin(); i != change.additions.end(); ++i)
+           i = change.additions.begin(); i != change.additions.end(); ++i)
         {
           if (marked_files.find(i->content) == marked_files.end())
             {
@@ -443,7 +443,7 @@ export_changes(database & db, lua_hooks 
             message << "Monotone-Date: " << date->value() << "\n";
 
           for (cert_iterator
-                 branch = branches.begin() ; branch != branches.end(); ++branch)
+               branch = branches.begin() ; branch != branches.end(); ++branch)
             message << "Monotone-Branch: " << branch->value() << "\n";
 
           for (cert_iterator tag = tags.begin(); tag != tags.end(); ++tag)
@@ -469,17 +469,17 @@ export_changes(database & db, lua_hooks 
         cout << "merge :" << marked_revs[parent2] << "\n";
 
       for (delete_iterator
-             i = change.deletions.begin(); i != change.deletions.end(); ++i)
+           i = change.deletions.begin(); i != change.deletions.end(); ++i)
         cout << "D " << quote_path(*i) << "\n";
 
       for (rename_iterator
-             i = reordered_renames.begin(); i != reordered_renames.end(); ++i)
+           i = reordered_renames.begin(); i != reordered_renames.end(); ++i)
         cout << "R "
              << quote_path(i->first) << " "
              << quote_path(i->second) << "\n";
 
       for (add_iterator
-             i = change.additions.begin(); i != change.additions.end(); ++i)
+           i = change.additions.begin(); i != change.additions.end(); ++i)
         cout << "M " << i->mode << " :"
              << marked_files[i->content] << " "
              << quote_path(i->path) << "\n";
@@ -522,20 +522,20 @@ export_rev_refs(vector<revision_id> cons
                 map<revision_id, size_t> & marked_revs)
 {
   for (vector<revision_id>::const_iterator
-         i = revisions.begin(); i != revisions.end(); ++i)
+       i = revisions.begin(); i != revisions.end(); ++i)
     cout << "reset refs/mtn/revs/" << *i << "\n"
          << "from :" << marked_revs[*i] << "\n";
 }
 
 void
 export_root_refs(database & db,
-                map<revision_id, size_t> & marked_revs)
+                 map<revision_id, size_t> & marked_revs)
 {
   set<revision_id> roots;
   revision_id nullid;
   db.get_revision_children(nullid, roots);
   for (set<revision_id>::const_iterator
-         i = roots.begin(); i != roots.end(); ++i)
+       i = roots.begin(); i != roots.end(); ++i)
     cout << "reset refs/mtn/roots/" << *i << "\n"
          << "from :" << marked_revs[*i] << "\n";
 }
@@ -547,7 +547,7 @@ export_leaf_refs(database & db,
   set<revision_id> leaves;
   db.get_leaves(leaves);
   for (set<revision_id>::const_iterator
-         i = leaves.begin(); i != leaves.end(); ++i)
+       i = leaves.begin(); i != leaves.end(); ++i)
     cout << "reset refs/mtn/leaves/" << *i << "\n"
          << "from :" << marked_revs[*i] << "\n";
 }
============================================================
--- src/git_export.hh	6888ff1cc0d73bf0b30ba975d6c4050bb345d9de
+++ src/git_export.hh	0992d00e68c6e6bce6b4ee5301ba029a220bd5c2
@@ -14,8 +14,8 @@ void validate_author_mappings(lua_hooks 
                    std::map<std::string, std::string> & mappings);
 
 void validate_author_mappings(lua_hooks & lua,
-                              std::map<std::string,
-                                       std::string> const & authors);
+                              std::map < std::string,
+                              std::string > const & authors);
 
 void import_marks(system_path const & marks_file,
                   std::map<revision_id, size_t> & marked_revs);
@@ -39,10 +39,10 @@ void export_root_refs(database & db,
                      std::map<revision_id, size_t> & marked_revs);
 
 void export_root_refs(database & db,
-                     std::map<revision_id, size_t> & marked_revs);
+                      std::map<revision_id, size_t> & marked_revs);
 
 void export_leaf_refs(database & db,
-                     std::map<revision_id, size_t> & marked_revs);
+                      std::map<revision_id, size_t> & marked_revs);
 
 #endif // __GIT_EXPORT_HH__
 
============================================================
--- src/git_change.hh	f2447c0cbf0311d85622f0aada664bce13ad1f10
+++ src/git_change.hh	a135101e3bed12f359ad648392a7cc74da2bf7d2
@@ -20,11 +20,11 @@ struct git_add
 
 struct git_add
 {
-    file_path path;
-    file_id content;
-    std::string mode;
-    git_add(file_path path, file_id content, std::string mode) :
-        path(path), content(content), mode(mode) {}
+  file_path path;
+  file_id content;
+  std::string mode;
+  git_add(file_path path, file_id content, std::string mode) :
+    path(path), content(content), mode(mode) {}
 };
 
 typedef std::vector<git_delete>::const_iterator delete_iterator;
@@ -33,9 +33,9 @@ struct git_change
 
 struct git_change
 {
-    std::vector<git_delete> deletions;
-    std::vector<git_rename> renames;
-    std::vector<git_add> additions;
+  std::vector<git_delete> deletions;
+  std::vector<git_rename> renames;
+  std::vector<git_add> additions;
 };
 
 void get_change(roster_t const & left, roster_t const & right,
============================================================
--- src/automate_reader.cc	ac60e48d6902a3cef96bc86a93640936c125beee
+++ src/automate_reader.cc	f0c56bbb217e83dc2599f4c1109341ab1940f90b
@@ -39,16 +39,16 @@ bool automate_reader::get_string(string 
     }
   while(c <= '9' && c >= '0')
     {
-      size = (size*10)+(c-'0');
+      size = (size * 10) + (c - '0');
       read(&c, 1);
     }
   E(c == ':', origin::user,
     F("bad input to automate stdio: expected ':' after string size"));
-  char *str = new char[size];
+  char * str = new char[size];
   size_t got = 0;
   while(got < size)
     {
-      int n = read(str+got, size-got);
+      int n = read(str + got, size - got);
       got += n;
     }
   out = string(str, size);
@@ -56,7 +56,7 @@ bool automate_reader::get_string(string 
   L(FL("Got string '%s'") % out);
   return true;
 }
-streamsize automate_reader::read(char *buf, size_t nbytes, bool eof_ok)
+streamsize automate_reader::read(char * buf, size_t nbytes, bool eof_ok)
 {
   streamsize rv;
 
============================================================
--- src/automate_reader.hh	284ff60073d0e4861ab516408fa82a8f7a295b04
+++ src/automate_reader.hh	8453dbdba80b72a8862c01dee3f0de6209d99f89
@@ -20,7 +20,7 @@ class automate_reader
   enum location {opt, cmd, none, eof};
   location loc;
   bool get_string(std::string & out);
-  std::streamsize read(char *buf, size_t nbytes, bool eof_ok = false);
+  std::streamsize read(char * buf, size_t nbytes, bool eof_ok = false);
   void go_to_next_item();
 public:
   automate_reader(std::istream & is);
============================================================
--- src/automate_ostream_demuxed.hh	cf23c94fe10bf23a627f1bc8b6ecda2c31a179b0
+++ src/automate_ostream_demuxed.hh	1db92d43327030688b5aba626333261d82e77ffc
@@ -16,7 +16,7 @@
 #include "automate_ostream.hh"
 #include "simplestring_xform.hh"
 
-template<typename _CharT, typename _Traits = std::char_traits<_CharT> >
+template < typename _CharT, typename _Traits = std::char_traits<_CharT> >
 class basic_automate_streambuf_demuxed : public std::basic_streambuf<_CharT, _Traits>
 {
   typedef _Traits traits_type;
@@ -71,11 +71,11 @@ public:
     (*errout) << out << std::endl;
   }
 
-  void write_headers(std::vector<std::pair<std::string,std::string> > const & headers)
+  void write_headers(std::vector<std::pair<std::string, std::string> > const & headers)
   {
     i18n_format prefix = F("%s: remote header: ") % prog_name;
     for (std::vector<std::pair<std::string, std::string> >::const_iterator h = headers.begin();
-       h != headers.end(); ++h)
+         h != headers.end(); ++h)
       {
         (*errout) << prefix.str() << h->first << ": " << h->second << std::endl;
       }
@@ -105,7 +105,7 @@ private:
   }
 };
 
-template<typename _CharT, typename _Traits = std::char_traits<_CharT> >
+template < typename _CharT, typename _Traits = std::char_traits<_CharT> >
 struct basic_automate_ostream_demuxed : public basic_automate_ostream<_CharT, _Traits>
 {
   typedef basic_automate_streambuf_demuxed<_CharT, _Traits> streambuf_type;
@@ -138,7 +138,7 @@ public:
   virtual void write_out_of_band(char type, std::string const & data)
   { _M_autobuf.write_out_of_band(type, data); }
 
-  virtual void write_headers(std::vector<std::pair<std::string,std::string> > const & headers)
+  virtual void write_headers(std::vector<std::pair<std::string, std::string> > const & headers)
   { _M_autobuf.write_headers(headers); }
 };
 
============================================================
--- src/rev_output.cc	a2c70b893b31296917d1a2b974faa1da46c13f1e
+++ src/rev_output.cc	8dd2d5d36450fae54e392ff5b3aea83d7deff36c
@@ -35,16 +35,16 @@ revision_header(revision_id const rid, r
 {
   vector<cert> certs;
   key_id empty_key;
-  certs.push_back(cert(rid, author_cert_name, 
+  certs.push_back(cert(rid, author_cert_name,
                        cert_value(author, origin::user), empty_key));
-  certs.push_back(cert(rid, date_cert_name, 
+  certs.push_back(cert(rid, date_cert_name,
                        cert_value(date.as_iso_8601_extended(), origin::user),
                        empty_key));
-  certs.push_back(cert(rid, branch_cert_name, 
+  certs.push_back(cert(rid, branch_cert_name,
                        cert_value(branch(), origin::user), empty_key));
 
   if (!changelog().empty())
-    certs.push_back(cert(rid, changelog_cert_name, 
+    certs.push_back(cert(rid, changelog_cert_name,
                          cert_value(changelog(), origin::user), empty_key));
 
   revision_header(rid, rev, certs, date_fmt, header);
@@ -147,29 +147,29 @@ revision_summary(revision_t const & rev,
         out << _("no changes") << '\n';
 
       for (set<file_path>::const_iterator i = cs.nodes_deleted.begin();
-            i != cs.nodes_deleted.end(); ++i)
-        out << (F("  dropped  %s") %*i) << '\n';
+           i != cs.nodes_deleted.end(); ++i)
+        out << (F("  dropped  %s") % *i) << '\n';
 
       for (map<file_path, file_path>::const_iterator
-            i = cs.nodes_renamed.begin();
-            i != cs.nodes_renamed.end(); ++i)
+           i = cs.nodes_renamed.begin();
+           i != cs.nodes_renamed.end(); ++i)
         out << (F("  renamed  %s\n"
                   "       to  %s") % i->first % i->second) << '\n';
 
       for (set<file_path>::const_iterator i = cs.dirs_added.begin();
-            i != cs.dirs_added.end(); ++i)
+           i != cs.dirs_added.end(); ++i)
         out << (F("  added    %s") % *i) << '\n';
 
       for (map<file_path, file_id>::const_iterator i = cs.files_added.begin();
-            i != cs.files_added.end(); ++i)
+           i != cs.files_added.end(); ++i)
         out << (F("  added    %s") % i->first) << '\n';
 
       for (map<file_path, pair<file_id, file_id> >::const_iterator
-              i = cs.deltas_applied.begin(); i != cs.deltas_applied.end(); ++i)
+           i = cs.deltas_applied.begin(); i != cs.deltas_applied.end(); ++i)
         out << (F("  patched  %s") % i->first) << '\n';
 
       for (map<pair<file_path, attr_key>, attr_value >::const_iterator
-             i = cs.attrs_set.begin(); i != cs.attrs_set.end(); ++i)
+           i = cs.attrs_set.begin(); i != cs.attrs_set.end(); ++i)
         out << (F("  attr on  %s\n"
                   "      set  %s\n"
                   "       to  %s")
@@ -179,10 +179,10 @@ revision_summary(revision_t const & rev,
       // the cset calls it attrs_cleared
       // the command is attr drop
       // here it is called unset
-      // the revision text uses attr clear 
+      // the revision text uses attr clear
 
       for (set<pair<file_path, attr_key> >::const_iterator
-             i = cs.attrs_cleared.begin(); i != cs.attrs_cleared.end(); ++i)
+           i = cs.attrs_cleared.begin(); i != cs.attrs_cleared.end(); ++i)
         out << (F("  attr on  %s\n"
                   "    unset  %s") % i->first % i->second) << '\n';
 
============================================================
--- src/rev_output.hh	666dd3ed35e16d8b122b4932c2aad05a21a22e25
+++ src/rev_output.hh	9f9e97b2e2168f99b6af7c7f941f02de9a621a29
@@ -17,13 +17,13 @@ void
 struct cert;
 
 void
-revision_header(revision_id const rid, revision_t const & rev, 
+revision_header(revision_id const rid, revision_t const & rev,
                 std::string const & author, date_t const date,
                 branch_name const & branch, utf8 const & changelog,
                 std::string const & date_fmt, utf8 & header);
 
 void
-revision_header(revision_id const rid, revision_t const & rev, 
+revision_header(revision_id const rid, revision_t const & rev,
                 std::vector<cert> const & certs, std::string const & date_fmt,
                 utf8 & header);
 
============================================================
--- src/cache_logger.cc	e5e7cc4fb068945e7a7a8cf0450dec6d31f160e3
+++ src/cache_logger.cc	98a87cb16e7d67366c3e9696abe2dacf94358af8
@@ -41,7 +41,7 @@ void cache_logger::log_exists(bool exist
 {
   if (_impl)
     {
-      _impl->stream << "Exists: " << (exists?"ok":"missing")
+      _impl->stream << "Exists: " << (exists ? "ok" : "missing")
                     << "; position: " << position
                     << "; count: " << item_count
                     << "; size: " << est_size << " of " << max_size
@@ -54,7 +54,7 @@ void cache_logger::log_touch(bool exists
 {
   if (_impl)
     {
-      _impl->stream << "Touch: " << (exists?"ok":"missing")
+      _impl->stream << "Touch: " << (exists ? "ok" : "missing")
                     << "; position: " << position
                     << "; count: " << item_count
                     << "; size: " << est_size << " of " << max_size
@@ -67,7 +67,7 @@ void cache_logger::log_fetch(bool exists
 {
   if (_impl)
     {
-      _impl->stream << "Fetch: " << (exists?"ok":"missing")
+      _impl->stream << "Fetch: " << (exists ? "ok" : "missing")
                     << "; position: " << position
                     << "; count: " << item_count
                     << "; size: " << est_size << " of " << max_size
@@ -76,7 +76,7 @@ void cache_logger::log_insert(int items_
 }
 
 void cache_logger::log_insert(int items_removed,
-                             int item_count, int est_size) const
+                              int item_count, int est_size) const
 {
   if (_impl)
     {
============================================================
--- src/cow_trie.hh	4531a829958b3a0f92abd30d4ef9bd35ef5e07a3
+++ src/cow_trie.hh	a92482fc7f6caddb8c59e0e139d577d55fa7ede4
@@ -24,7 +24,7 @@ private:
   //typedef _Value value_type;
   typedef std::pair<_Key, _Value> value_type;
 private:
-  enum { mask = (1<<_Bits)-1 };
+  enum { mask = (1 << _Bits) - 1 };
   enum { levels = (sizeof(_Key) * 8 + _Bits - 1) / _Bits };
 
   struct middle_node_type
@@ -40,47 +40,47 @@ private:
   unsigned _count;
   boost::shared_ptr<void> _data;
 
-  bool walk(boost::shared_ptr<void> & d, _Key key, int level, _Value **ret)
+  bool walk(boost::shared_ptr<void> & d, _Key key, int level, _Value ** ret)
   {
     if (!d)
       {
-	if (level > 0)
-	  d.reset(new middle_node_type());
-	else
-	  d.reset(new leaf_node_type());
+        if (level > 0)
+          d.reset(new middle_node_type());
+        else
+          d.reset(new leaf_node_type());
       }
     if (!d.unique())
       {
-	if (level > 0)
-	  d.reset(new middle_node_type(*boost::static_pointer_cast<middle_node_type>(d)));
-	else
-	  d.reset(new leaf_node_type(*boost::static_pointer_cast<leaf_node_type>(d)));
+        if (level > 0)
+          d.reset(new middle_node_type(*boost::static_pointer_cast<middle_node_type>(d)));
+        else
+          d.reset(new leaf_node_type(*boost::static_pointer_cast<leaf_node_type>(d)));
       }
     unsigned idx = (key >> (_Bits * level)) & mask;
     if (level > 0)
       return walk(boost::static_pointer_cast<middle_node_type>(d)->contents[idx],
-		  key, level-1, ret);
+                  key, level - 1, ret);
     else
       {
-	*ret = &boost::static_pointer_cast<leaf_node_type>(d)->contents[idx];
-	return true;
+        *ret = &boost::static_pointer_cast<leaf_node_type>(d)->contents[idx];
+        return true;
       }
   }
 
-  bool walk(boost::shared_ptr<void> const & d, _Key key, int level, _Value **ret) const
+  bool walk(boost::shared_ptr<void> const & d, _Key key, int level, _Value ** ret) const
   {
     if (!d)
       {
-	return false;
+        return false;
       }
     unsigned idx = (key >> (_Bits * level)) & mask;
     if (level > 0)
       return walk(boost::static_pointer_cast<middle_node_type>(d)->contents[idx],
-		  key, level-1, ret);
+                  key, level - 1, ret);
     else
       {
-	*ret = &boost::static_pointer_cast<leaf_node_type>(d)->contents[idx];
-	return true;
+        *ret = &boost::static_pointer_cast<leaf_node_type>(d)->contents[idx];
+        return true;
       }
   }
 public:
@@ -92,9 +92,10 @@ public:
     _count = 0;
     _data.reset();
   }
-  _Value const & set(_Key key, _Value const & value) {
-    _Value *p;
-    walk(_data, key, levels-1, &p);
+  _Value const & set(_Key key, _Value const & value)
+  {
+    _Value * p;
+    walk(_data, key, levels - 1, &p);
     bool b = (*p != _empty_value);
     bool a = (value != _empty_value);
     if (b && !a)
@@ -106,33 +107,35 @@ public:
   }
   bool set_if_missing(_Key key, _Value const & value)
   {
-    _Value *p;
-    walk(_data, key, levels-1, &p);
+    _Value * p;
+    walk(_data, key, levels - 1, &p);
     if (*p != _empty_value)
       return false;
     if (value != _empty_value)
       {
-	++_count;
-	*p = value;
+        ++_count;
+        *p = value;
       }
     return true;
   }
-  void unset(_Key key) {
+  void unset(_Key key)
+  {
     set(key, _empty_value);
   }
-  _Value const &get_if_present(_Key key) const {
-    _Value *p;
-    if (walk(_data, key, levels-1, &p))
+  _Value const & get_if_present(_Key key) const
+  {
+    _Value * p;
+    if (walk(_data, key, levels - 1, &p))
       return *p;
     else
       return _empty_value;
   }
   // This is actually not the same as above.
   // It's non-const, so it calls the other walk().
-  _Value const &get_unshared_if_present(_Key key)
+  _Value const & get_unshared_if_present(_Key key)
   {
-    _Value *p;
-    if (walk(_data, key, levels-1, &p))
+    _Value * p;
+    if (walk(_data, key, levels - 1, &p))
       return *p;
     else
       return _empty_value;
@@ -146,7 +149,7 @@ public:
       unsigned idx;
       bool operator==(stack_item const & other) const
       {
-	return ptr == other.ptr && idx == other.idx;
+        return ptr == other.ptr && idx == other.idx;
       }
     };
     std::vector<stack_item> stack;
@@ -154,13 +157,13 @@ public:
     explicit const_iterator(cow_trie const & t)
     {
       if (t._data)
-	{
-	  stack_item item;
-	  item.ptr = t._data;
-	  item.idx = (unsigned)-1;
-	  stack.push_back(item);
-	  ++(*this);
-	}
+        {
+          stack_item item;
+          item.ptr = t._data;
+          item.idx = (unsigned) - 1;
+          stack.push_back(item);
+          ++(*this);
+        }
     }
     _Value _empty_value;
   private:
@@ -178,41 +181,41 @@ public:
     const_iterator const & operator++()
     {
       while (!stack.empty())
-	{
-	  stack_item & item = stack.back();
-	  boost::shared_ptr<middle_node_type> middle
-	    = boost::static_pointer_cast<middle_node_type>(item.ptr);
-	  boost::shared_ptr<leaf_node_type> leaf
-	    = boost::static_pointer_cast<leaf_node_type>(item.ptr);
-	  for (++item.idx; item.idx < (1<<_Bits); ++item.idx)
-	    {
-	      if (stack.size() == levels)
-		{
-		  if (leaf->contents[item.idx] != _empty_value)
-		    {
-		      _ret.first = (_ret.first & ~mask) | item.idx;
-		      _ret.second = leaf->contents[item.idx];
-		      return *this;
-		    }
-		}
-	      else
-		{
-		  if (middle->contents[item.idx])
-		    {
-		      int shifts = levels - stack.size();
-		      int bits = shifts * _Bits;
-		      _ret.first = (_ret.first & ~(mask<<bits)) | (item.idx<<bits);
-		      stack_item i;
-		      i.ptr = middle->contents[item.idx];
-		      i.idx = (unsigned)-1;
-		      stack.push_back(i);
-		      break;
-		    }
-		}
-	    }
-	  if (item.idx == (1 << _Bits))
-	    stack.pop_back();
-	}
+        {
+          stack_item & item = stack.back();
+          boost::shared_ptr<middle_node_type> middle
+          = boost::static_pointer_cast<middle_node_type>(item.ptr);
+          boost::shared_ptr<leaf_node_type> leaf
+          = boost::static_pointer_cast<leaf_node_type>(item.ptr);
+          for (++item.idx; item.idx < (1 << _Bits); ++item.idx)
+            {
+              if (stack.size() == levels)
+                {
+                  if (leaf->contents[item.idx] != _empty_value)
+                    {
+                      _ret.first = (_ret.first & ~mask) | item.idx;
+                      _ret.second = leaf->contents[item.idx];
+                      return *this;
+                    }
+                }
+              else
+                {
+                  if (middle->contents[item.idx])
+                    {
+                      int shifts = levels - stack.size();
+                      int bits = shifts * _Bits;
+                      _ret.first = (_ret.first & ~(mask << bits)) | (item.idx << bits);
+                      stack_item i;
+                      i.ptr = middle->contents[item.idx];
+                      i.idx = (unsigned) - 1;
+                      stack.push_back(i);
+                      break;
+                    }
+                }
+            }
+          if (item.idx == (1 << _Bits))
+            stack.pop_back();
+        }
       return *this;
     }
     value_type const & operator*() const
============================================================
--- src/maybe_workspace_updater.cc	f1d41132d47052f6b9f54980de4feb21348e715a
+++ src/maybe_workspace_updater.cc	c1ee022eb8ee6e62581d88448950f245f8fedcf6
@@ -20,7 +20,8 @@ void update(app_state & app, args_vector
 // defined in cmd_merging.cc
 void update(app_state & app, args_vector const & args);
 
-namespace {
+namespace
+{
   enum updatability { is_head, is_not_head, not_updatable };
   updatability get_updatability(app_state & app, project_t & project)
   {
@@ -35,7 +36,7 @@ namespace {
     work.get_options(workspace_opts);
     std::set<revision_id> heads;
     project.get_branch_heads(workspace_opts.branch, heads, false);
-    
+
     revision_id parent = edge_old_revision(rev.edges.begin());
     if (heads.find(parent) != heads.end())
       return is_head;
============================================================
--- src/option_reset_info.hh	a11b67e3d5c52197a2228224b8927b2d27471b3a
+++ src/option_reset_info.hh	b699f762793a213437bd8619ac11a8d22b32b54f
@@ -14,7 +14,8 @@
 
 #include "option.hh"
 
-namespace commands {
+namespace commands
+{
   class command;
 }
 
============================================================
--- src/automate_stdio_helpers.hh	b9eaf1c16872c7b18e37edd03b5bfb56b47e690e
+++ src/automate_stdio_helpers.hh	13651121e555f69e325843fe7de6a117322abadd
@@ -26,7 +26,7 @@ public:
   static void
   automate_stdio_shared_setup(app_state & app,
                               std::vector<std::string> const & cmdline,
-                              std::vector<std::pair<std::string,std::string> >
+                              std::vector<std::pair<std::string, std::string> >
                               const * const params,
                               commands::command_id & id,
                               /* reference-to-pointer here is intentional */
@@ -35,7 +35,7 @@ public:
   static std::pair<int, std::string>
   automate_stdio_shared_body(app_state & app,
                              std::vector<std::string> const & cmdline,
-                             std::vector<std::pair<std::string,std::string> >
+                             std::vector<std::pair<std::string, std::string> >
                              const & params,
                              std::ostream & os,
                              boost::function<void()> init_fn,
============================================================
--- src/options_applicator.cc	5407017eb222eb329f9e22fc465d4f36743d37ee
+++ src/options_applicator.cc	315da3c09be39431aa9bb19e69c0e28c993e5b59
@@ -25,7 +25,7 @@ options_applicator::options_applicator(o
 };
 
 options_applicator::options_applicator(options const & opts,
-				       options_applicator::for_what what)
+                                       options_applicator::for_what what)
   : _impl(new options_applicator_impl())
 {
   _impl->what = what;
@@ -47,15 +47,15 @@ options_applicator::options_applicator(o
   else
     {
       if (opts.ticker == "none")
-	ui.set_tick_write_nothing();
+        ui.set_tick_write_nothing();
       else if (opts.ticker == "dot")
-	ui.set_tick_write_dot();
+        ui.set_tick_write_dot();
       else if (opts.ticker == "count")
-	ui.set_tick_write_count();
+        ui.set_tick_write_count();
       else if (opts.ticker == "stdio")
-	ui.set_tick_write_stdio();
+        ui.set_tick_write_stdio();
       else
-	I(opts.ticker.empty());
+        I(opts.ticker.empty());
     }
 }
 
============================================================
--- src/key_packet.cc	d1306df89dd684badac02c03744cd446381c07d3
+++ src/key_packet.cc	50ba70750429646d0928838a45482198db962cf2
@@ -75,7 +75,7 @@ namespace
 namespace
 {
   struct
-  feed_key_packet_consumer : public origin_aware
+    feed_key_packet_consumer : public origin_aware
   {
     size_t & count;
     key_packet_consumer & cons;
@@ -116,7 +116,7 @@ namespace
         {
           E(false, origin::user,
             F("malformed key_packet: invalid public key data for '%s': %s")
-              % name % e.what());
+            % name % e.what());
         }
     }
     void validate_private_key_data(string const & name, string const & keydata) const
@@ -135,7 +135,7 @@ namespace
         {
           E(false, origin::user,
             F("malformed key_packet: invalid private key data for '%s': %s")
-              % name % e.what());
+            % name % e.what());
         }
       // since we do not want to prompt for a password to decode it finally,
       // we ignore all other exceptions
@@ -149,7 +149,7 @@ namespace
         F("malformed key_packet: too many arguments in header"));
     }
 
-    static void read_rest(istream& in, string& dest)
+    static void read_rest(istream & in, string & dest)
     {
 
       while (true)
@@ -176,7 +176,7 @@ namespace
       L(FL("read keypair key_packet"));
       string::size_type hashpos = body.find('#');
       string pub(body, 0, hashpos);
-      string priv(body, hashpos+1);
+      string priv(body, hashpos + 1);
 
       validate_key(args);
       validate_base64(pub);
@@ -226,7 +226,8 @@ extract_key_packets(string const & s, ke
 
   string::const_iterator p, tbeg, tend, abeg, aend, bbeg, bend;
 
-  enum extract_state {
+  enum extract_state
+  {
     skipping, open_bracket, scanning_type, found_type,
     scanning_args, found_args, scanning_body,
     end_1, end_2, end_3, end_4, end_5
@@ -295,7 +296,7 @@ static size_t
 // this is same as rfind, but search area is haystack[start:] (from start to end of string)
 // haystack is searched, needle is pattern
 static size_t
-rfind_in_substr(std::string const& haystack, size_t start, std::string const& needle)
+rfind_in_substr(std::string const & haystack, size_t start, std::string const & needle)
 {
   I(start <= haystack.size());
   const std::string::const_iterator result =
@@ -319,7 +320,7 @@ read_key_packets(istream & in, key_packe
   while(in)
     {
       size_t const next_search_pos = (accum.size() >= end.size())
-                                      ? accum.size() - end.size() : 0;
+                                     ? accum.size() - end.size() : 0;
       in.read(buf, bufsz);
       accum.append(buf, in.gcount());
       string::size_type endpos = string::npos;
@@ -330,7 +331,7 @@ read_key_packets(istream & in, key_packe
           string tmp = accum.substr(0, endpos);
           count += extract_key_packets(tmp, cons);
           if (endpos < accum.size() - 1)
-            accum = accum.substr(endpos+1);
+            accum = accum.substr(endpos + 1);
           else
             accum.clear();
         }
============================================================
--- src/date_format.hh	2100f7a1854afa9573d3991e41bbdc36f75c7e5b
+++ src/date_format.hh	0d066f33ef46ae7aebb2fd3f603d77e8578f3f50
@@ -17,8 +17,8 @@ get_date_format(options const & opts,
 
 inline std::string
 get_date_format(options const & opts,
-		lua_hooks & lua,
-		date_format_spec spec)
+                lua_hooks & lua,
+                date_format_spec spec)
 {
   std::string date_fmt;
   if (!opts.no_format_dates)
============================================================
--- test/func/serve-automate/__driver__.lua	d41f819fd49060c45bb154dde614699b0cb938e7
+++ test/func/serve-automate/__driver__.lua	ae4b03baf16d8401f31a32c8377021f879788d01
@@ -15,7 +15,7 @@ check(
 local errors = run_remote_stdio(server, "l17:interface_versione", 1, 0, "e")
 check(
     table.maxn(errors) == 1 and
-    errors[1] == "misuse: Sorry, you aren't allowed to do that."
+    errors[1] == "misuse: sorry, you aren't allowed to do that."
 )
 
 server:stop()
============================================================
--- test/func/db_opt_fallback_mechanisms/__driver__.lua	09354a970b921effa2850aa4ad722ce95a433ea8
+++ test/func/db_opt_fallback_mechanisms/__driver__.lua	0b0da944f19f7a0be42165069fdc1bb8862fd6ee
@@ -14,4 +14,4 @@ check(raw_mtn("au", "remote", "interface
 -- and some commands should use :memory: as default because they
 -- just need a temporary throw-away database to work properly
 check(raw_mtn("au", "remote", "interface_version", "--remote-stdio-host", "http://code.monotone.ca/monotone", "--key="), 0, false, true)
-check(qgrep("No database given; assuming ':memory:' database", "stderr"))
+check(qgrep("no database given; assuming ':memory:' database", "stderr"))

reply via email to

[Prev in Thread] Current Thread [Next in Thread]