[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[GNUnet-SVN] r3601 - in GNUnet: . src/applications/sqstore_sqlite
From: |
grothoff |
Subject: |
[GNUnet-SVN] r3601 - in GNUnet: . src/applications/sqstore_sqlite |
Date: |
Wed, 1 Nov 2006 11:44:35 -0800 (PST) |
Author: grothoff
Date: 2006-11-01 11:44:32 -0800 (Wed, 01 Nov 2006)
New Revision: 3601
Modified:
GNUnet/ChangeLog
GNUnet/src/applications/sqstore_sqlite/sqlite.c
GNUnet/src/applications/sqstore_sqlite/sqlitetest2.c
GNUnet/todo
Log:
sqlite improvements
Modified: GNUnet/ChangeLog
===================================================================
--- GNUnet/ChangeLog 2006-11-01 19:38:32 UTC (rev 3600)
+++ GNUnet/ChangeLog 2006-11-01 19:44:32 UTC (rev 3601)
@@ -1,3 +1,10 @@
+Wed Nov 1 13:09:53 MST 2006
+ Fixed some problems with index creation in sqlite
+ datastore (discovered with new sqlite benchmarking
+ code). Performance should improve significantly
+ (observed was a factor of 4-10 depending on
+ database size and operation).
+
Thu Oct 19 23:44:24 MDT 2006
Completed huge update to FSUI API (not fully debugged).
Major changes include:
Modified: GNUnet/src/applications/sqstore_sqlite/sqlite.c
===================================================================
--- GNUnet/src/applications/sqstore_sqlite/sqlite.c 2006-11-01 19:38:32 UTC
(rev 3600)
+++ GNUnet/src/applications/sqstore_sqlite/sqlite.c 2006-11-01 19:44:32 UTC
(rev 3601)
@@ -132,6 +132,10 @@
(const char**) &dummy);
}
+// #define CHECK(a) GE_BREAK(ectx, a)
+#define ENULL &e
+#define CHECK(a) if (! a) { fprintf(stderr, "%s\n", e); sqlite3_free(e); }
+
/**
* @brief Get a database handle for this thread.
* @note SQLite handles may no be shared between threads - see
@@ -143,6 +147,7 @@
unsigned int idx;
sqliteHandle * ret;
sqlite3_stmt * stmt;
+ char * e;
/* Is the DB already open? */
for (idx = 0; idx < db->handle_count; idx++)
@@ -162,25 +167,19 @@
return NULL;
}
- if (db->handle_count == 1) {
- /* first open: create indices! */
- sqlite3_exec(ret->dbh, "CREATE INDEX idx_hash ON gn070 (hash)",
- NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "CREATE INDEX idx_prio ON gn070 (prio)",
- NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "CREATE INDEX idx_expire ON gn070 (expire)",
- NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "CREATE INDEX idx_comb1 ON gn070
(prio,expire,hash)",
- NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "CREATE INDEX idx_comb2 ON gn070
(expire,prio,hash)",
- NULL, NULL, NULL);
- }
-
- sqlite3_exec(ret->dbh, "PRAGMA temp_store=MEMORY", NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "PRAGMA synchronous=OFF", NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "PRAGMA count_changes=OFF", NULL, NULL, NULL);
- sqlite3_exec(ret->dbh, "PRAGMA page_size=4096", NULL, NULL, NULL);
-
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "PRAGMA temp_store=MEMORY", NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "PRAGMA synchronous=OFF", NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "PRAGMA count_changes=OFF", NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "PRAGMA page_size=4092", NULL, NULL, ENULL));
+
/* We have to do it here, because otherwise precompiling SQL might fail */
sq_prepare(ret->dbh,
"Select 1 from sqlite_master where tbl_name = 'gn070'",
@@ -203,6 +202,27 @@
FREE(ret);
return NULL;
}
+ /* create indices */
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "CREATE INDEX idx_hash ON gn070 (hash)",
+ NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "CREATE INDEX idx_prio ON gn070 (prio)",
+ NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "CREATE INDEX idx_expire ON gn070 (expire)",
+ NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "CREATE INDEX idx_comb1 ON gn070 (prio,expire,hash)",
+ NULL, NULL, ENULL));
+ CHECK(SQLITE_OK ==
+ sqlite3_exec(ret->dbh,
+ "CREATE INDEX idx_comb2 ON gn070 (expire,prio,hash)",
+ NULL, NULL, ENULL));
}
sqlite3_finalize(stmt);
@@ -283,7 +303,14 @@
if (stats)
stats->set(stat_size, ret);
MUTEX_UNLOCK(db->DATABASE_Lock_);
- return ret;
+ return ret * 1.02;
+ /* benchmarking shows 12% overhead, this is
+ most likely related to the benchmark setup
+ of adding 10% more than quota at a time
+ before cleaning up; overhead without this
+ is usually around 2% during pure insertion;
+ so we take the 2% here, which for frequent
+ cleaning up should be sufficiently accurate */
}
/**
@@ -500,8 +527,8 @@
http://permalink.gmane.org/gmane.network.gnunet.devel/1363 */
strcpy(scratch,
"SELECT size, type, prio, anonLevel, expire, hash, value FROM gn070"
- " where rowid in (Select rowid from gn070"
- " WHERE ((hash > :1 AND expire == :2 AND prio == :3) OR ");
+ " where rowid in (Select rowid from gn070"
+ " WHERE ((expire == :2 AND prio == :3 AND hash > :1) OR ");
if (sortByPriority)
strcat(scratch,
"(expire > :4 AND prio == :5) OR prio > :6)");
Modified: GNUnet/src/applications/sqstore_sqlite/sqlitetest2.c
===================================================================
--- GNUnet/src/applications/sqstore_sqlite/sqlitetest2.c 2006-11-01
19:38:32 UTC (rev 3600)
+++ GNUnet/src/applications/sqstore_sqlite/sqlitetest2.c 2006-11-01
19:44:32 UTC (rev 3601)
@@ -22,14 +22,13 @@
* @brief Test for the sqstore implementations.
* @author Christian Grothoff
*
- * This testcase inserts a bunch of (variable size) data and then deletes
- * data until the (reported) database size drops below a given threshold.
- * This is iterated 10 times, with the actual size of the content stored,
- * the database size reported and the file size on disk being printed for
- * each iteration. The code also prints a "I" for every 40 blocks
- * inserted and a "D" for every 40 blocks deleted. The deletion
- * strategy alternates between "lowest priority" and "earliest expiration".
- * Priorities and expiration dates are set using a pseudo-random value
+ * This testcase inserts a bunch of (variable size) data and then
+ * deletes data until the (reported) database size drops below a given
+ * threshold. This is iterated 10 times, with the actual size of the
+ * content stored, the database size reported and the file size on
+ * disk being printed for each iteration. The deletion strategy
+ * alternates between "lowest priority" and "earliest expiration".
+ * Priorities and expiration dates are set using a pseudo-random value
* within a realistic range.
*/
@@ -45,41 +44,12 @@
#define ASSERT(x) do { if (! (x)) { printf("Error at %s:%d\n", __FILE__,
__LINE__); goto FAILURE;} } while (0)
/**
- * Target datastore size (in bytes).
- * <p>
- *
- * Example impact of total size on the reported number
- * of operations (insert and delete) per second (once
- * stabilized) for a particular machine:
- * <pre>
- * 1: 824
- * 2: 650
- * 4: 350
- * 8: 343
- * 16: 230
- * 32: 131
- * 64: 70
- * 128: 47
- * </pre>
- * <p>
- * This was measured on a machine with 4 GB main
- * memory and under 100% CPU load, so disk overhead
- * is NOT relevant for the performance loss for
- * larger sizes. This seems to indicate that
- * at least one of the performed operation does not
- * yet scale to larger sizes! This is most likely
- * the delete operation -- initial pure insertion
- * peaks at about 2500 operations per second!<br>
- *
- * <p>
- * The disk size overhead (additional disk space used
- * compared to actual data stored) for all sizes
- * was around 12-17%. The API-reported size was
- * 12.74 bytes per entry more than the actual data
- * stored (which is a good estimate of the module's
- * internal overhead).
+ * Target datastore size (in bytes). Realistic sizes are
+ * more like 16 GB (not the default of 16 MB); however,
+ * those take too long to run them in the usual "make check"
+ * sequence. Hence the value used for shipping is tiny.
*/
-#define MAX_SIZE 1024 * 1024 * 4
+#define MAX_SIZE 1024LL * 1024 * 16
/**
* Report progress outside of major reports? Should probably be YES if
@@ -186,11 +156,13 @@
* Add testcode here!
*/
static int test(SQstore_ServiceAPI * api) {
+ unsigned long long lops;
int i;
int j;
unsigned long long size;
int have_file;
+ lops = 0;
have_file = OK == disk_file_test(NULL,
DB_NAME);
@@ -207,9 +179,9 @@
/* trim down below MAX_SIZE again */
if ((i % 2) == 0)
- api->iterateLowPriority(0, &iterateDelete, api);
- else
- api->iterateExpirationTime(0, &iterateDelete, api);
+ api->iterateLowPriority(0, &iterateDelete, api);
+ else
+ api->iterateExpirationTime(0, &iterateDelete, api);
/* every 10 iterations print status */
size = 0;
@@ -222,15 +194,16 @@
#if REPORT_ID
"\n"
#endif
- "Useful %llu, API %llu (Useful-API: %lld/%.2f), disk %llu (%.2f%%) /
%lluk ops / %llu ops/s\n",
+ "%u: Useful %llu, API %llu, disk %llu (%.2f%%) / %lluk ops / %llu
ops/s\n",
+ i,
stored_bytes / 1024, /* used size in k */
api->getSize() / 1024, /* API-reported size in k */
- (api->getSize() - stored_bytes) / 1024, /* difference between
reported and used */
- 1.0 * (api->getSize() - stored_bytes) / (stored_entries *
sizeof(Datastore_Value)), /* relative to number of entries (should be equal to
internal overhead per entry) */
size / 1024, /* disk size in kb */
(100.0 * size / stored_bytes) - 100, /* overhead */
(stored_ops * 2 - stored_entries) / 1024, /* total operations (in k)
*/
- 1000 * (stored_ops * 2 - stored_entries) / (1 + get_time() -
start_time)); /* operations per second */
+ 1000 * ((stored_ops * 2 - stored_entries) - lops) / (1 + get_time()
- start_time)); /* operations per second */
+ lops = stored_ops * 2 - stored_entries;
+ start_time = get_time();
if (GNUNET_SHUTDOWN_TEST() == YES)
break;
}
Modified: GNUnet/todo
===================================================================
--- GNUnet/todo 2006-11-01 19:38:32 UTC (rev 3600)
+++ GNUnet/todo 2006-11-01 19:44:32 UTC (rev 3601)
@@ -50,8 +50,13 @@
- Mantis #0001126
+ ncurses wizard
- fix critical known bugs (see Mantis for updates):
- * disk quota violations (#1128) [CG, RC]
- * high CPU usage (quota exceeded) #1123 [CG, RC]
+ * disk quota violations (#1128) [CG, RCpre0]
+ - mysql estimate function is WAY off
+ => Benchmark (mysqltest2) for
+ those problems now exist! => easy to fix!
+ Likely related: high CPU usage (quota exceeded) #1123 [CG, RC]
+ - need to check: why quota-deletion happens during startup
+ - need to check: adding missing indices during
gnunet-update
* Windows installer, uninstall: Remove account [Nils, RC]
* file/socket leak (#955) - possibly fixed
- More testcases: [RCpre3]
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [GNUnet-SVN] r3601 - in GNUnet: . src/applications/sqstore_sqlite,
grothoff <=