diff --git a/CHANGELOG b/CHANGELOG
index a1876ef2d4f7c43d80a4de24bee9b4add34f0297..ff1ba240a8b79fece269320c5824655f27eb4f31 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,4 +1,17 @@
-0.4.5 Apr 4:
+0.5.1 May 16, 2014:
+Client:
+  o Close connection with server after syncing to avoid client hanging
+    on exit. Fixes #5507.
+
+Common:
+  o Properly close connections on couch backend. Also prevent file
+    descriptor leaks on tests. Closes #5493.
+  o Prevent couch backend from always trying to create the
+    database. Fixes #5386.
+  o Prevent Couch Server State from making one uneeded GET request on
+    instantiation. Fixes #5386.
+
+0.5.0 Apr 4, 2014:
 Client:
   o Catch lock timeout exception. Fixes #4435.
   o Add lock for create_doc and update_indexes call, prevents
@@ -62,9 +75,7 @@ Server:
     Server application. Fixes #5302.
   o Enable Gzip compression on the soledad wsgi app.
 
--- 2014 --
-
-0.4.4 Dec 6:
+0.4.4 Dec 6, 2013:
 Client:
   o Add MAC verirication to the recovery document and
     soledad.json. Closes #4348.
@@ -72,14 +83,14 @@ Common:
   o Add unicode conversion to put_doc(). Closes #4095.
   o Remove tests dependency on nose2. Closes #4258.
 
-0.4.3 Nov 15:
+0.4.3 Nov 15, 2013:
 Client:
   o Defaults detected encoding to utf-8 to avoid bug if detected
     encoding is None. Closes: #4417
   o Open db in autocommit mode, to avoid nested transactions problems.
     Closes: #4400
 
-0.4.2 Nov 1:
+0.4.2 Nov 1, 2013:
 Client:
   o Support non-ascii passwords. Closes #4001.
   o Change error severity for missing secrets path.
@@ -93,11 +104,11 @@ Common:
 Server:
   o Allow for locking the shared database. Closes #4097.
 
-0.4.1 Oct 4:
+0.4.1 Oct 4, 2013:
 Client:
   o Save only UTF8 strings. Related to #3660.
 
-0.4.0 Sep 20:
+0.4.0 Sep 20, 2013:
 Client:
   o Remove redundant logging when creating data dirs.
 Server:
@@ -107,14 +118,14 @@ Common:
   o Turn couchdb dependency for common into optional. Closes #2167.
   o Add verification for couch permissions. Closes #3501.
 
-0.3.2 Sep 6:
+0.3.2 Sep 6, 2013:
 Client:
   o Use dirspec instead of plain xdg. Closes #3574.
 Server:
   o Fix the init script for soledad server so that it uses the new
     package namespace.
 
-0.3.1 Aug 23:
+0.3.1 Aug 23, 2013:
 Client:
   o Add libsqlite3-dev requirement for soledad.
   o Check for None in private methods that depend on _db. Closes:
@@ -134,7 +145,7 @@ Common:
     #3487.
   o Add versioneer, parse_requirements
 
-0.3.0 Aug 9:
+0.3.0 Aug 9, 2013:
 Client:
   o Thread safe wrapper for pysqlcipher.
   o Fix a couple of typos that prevented certain functionality to
@@ -143,7 +154,7 @@ Server:
   o A plaintext port is not opened by soledad server initscript call
     to twistd web anymore. Closes #3254.
 
-0.2.3 Jul 26:
+0.2.3 Jul 26, 2013:
 Client:
   o Avoid possible timing attack in document's mac comparison by
     comparing hashes instead of plain macs. Closes #3243.
@@ -154,13 +165,13 @@ Server:
     pointing the PRIVKEY_PATH to /etc/leap/soledad-server.key. Fixes
     #3174.
 
-0.2.2 Jul 12:
+0.2.2 Jul 12, 2013:
 Client:
   o Add method for password change.
 Server:
   o Use the right name as the WSGI server
 
-0.2.1 Jun 28:
+0.2.1 Jun 28, 2013:
 Client:
   o Do not list the backends in the __init__'s __all__ to allow not
     supporting couch on the client side until the code is diveded into
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index 3aea340d389f7fc56585cd6bfde35c3a43b8510e..04f8ebf975a13fb590234b654507b36e364589b0 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -43,12 +43,15 @@ So, as the statements above were introduced for backwards compatibility with
 SLCipher 1.1 databases, we do not implement them as all SQLCipher databases
 handled by Soledad should be created by SQLCipher >= 2.0.
 """
-import httplib
 import logging
 import os
 import string
 import threading
 import time
+import json
+
+from hashlib import sha256
+from contextlib import contextmanager
 
 from pysqlcipher import dbapi2
 from u1db.backends import sqlite_backend
@@ -149,7 +152,6 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
     k_lock = threading.Lock()
     create_doc_lock = threading.Lock()
     update_indexes_lock = threading.Lock()
-    _syncer = None
 
     def __init__(self, sqlcipher_file, password, document_factory=None,
                  crypto=None, raw_key=False, cipher='aes-256-cbc',
@@ -211,6 +213,7 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
                                    has_conflicts=has_conflicts,
                                    syncable=syncable)
         self.set_document_factory(factory)
+        self._syncers = {}
 
     @classmethod
     def _open_database(cls, sqlcipher_file, password, document_factory=None,
@@ -351,46 +354,46 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
         :return: The local generation before the synchronisation was performed.
         :rtype: int
         """
-        if not self.syncer:
-            self._create_syncer(url, creds=creds)
-
-        try:
-            res = self.syncer.sync(autocreate=autocreate)
-        except httplib.CannotSendRequest:
-            # raised when you reuse httplib.HTTP object for new request
-            # while you havn't called its getresponse()
-            # this catch works for the current connclass used
-            # by our HTTPClientBase, since it uses httplib.
-            # we will have to replace it if it changes.
-            logger.info("Replacing connection and trying again...")
-            self._syncer = None
-            self._create_syncer(url, creds=creds)
-            res = self.syncer.sync(autocreate=autocreate)
+        res = None
+        with self.syncer(url, creds=creds) as syncer:
+            res = syncer.sync(autocreate=autocreate)
         return res
 
-    @property
-    def syncer(self):
+    @contextmanager
+    def syncer(self, url, creds=None):
         """
         Accesor for synchronizer.
         """
-        return self._syncer
+        syncer = self._get_syncer(url, creds=creds)
+        yield syncer
+        syncer.sync_target.close()
 
-    def _create_syncer(self, url, creds=None):
+    def _get_syncer(self, url, creds=None):
         """
-        Creates a synchronizer
+        Get a synchronizer for C{url} using C{creds}.
 
         :param url: The url of the target replica to sync with.
         :type url: str
         :param creds: optional dictionary giving credentials.
-            to authorize the operation with the server.
+                      to authorize the operation with the server.
         :type creds: dict
+
+        :return: A synchronizer.
+        :rtype: u1db.sync.Synchronizer
         """
-        if self._syncer is None:
-            self._syncer = Synchronizer(
+        # we want to store at most one syncer for each url, so we also store a
+        # hash of the connection credentials and replace the stored syncer for
+        # a certain url if credentials have changed.
+        h = sha256(json.dumps([url, creds])).hexdigest()
+        cur_h, syncer = self._syncers.get(url, (None, None))
+        if syncer is None or h != cur_h:
+            syncer = Synchronizer(
                 self,
                 SoledadSyncTarget(url,
                                   creds=creds,
                                   crypto=self._crypto))
+            self._syncers[url] = (h, syncer)
+        return syncer
 
     def _extra_schema_init(self, c):
         """
diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py
index 8e8613a14a5c7807a9849d0a1cf400e500b056e7..0aa841706ddbcf5f249340ab07e1c266e106c9fb 100644
--- a/common/src/leap/soledad/common/couch.py
+++ b/common/src/leap/soledad/common/couch.py
@@ -31,14 +31,16 @@ import threading
 
 from StringIO import StringIO
 from collections import defaultdict
+from urlparse import urljoin
+from contextlib import contextmanager
 
 
-from couchdb.client import Server
+from couchdb.client import Server, Database
 from couchdb.http import (
     ResourceConflict,
     ResourceNotFound,
     ServerError,
-    Session,
+    Session as CouchHTTPSession,
 )
 from u1db import query_parser, vectorclock
 from u1db.errors import (
@@ -331,6 +333,35 @@ class MultipartWriter(object):
                 self.headers[name] = value
 
 
+class Session(CouchHTTPSession):
+    """
+    An HTTP session that can be closed.
+    """
+
+    def close_connections(self):
+        for key, conns in list(self.conns.items()):
+            for conn in conns:
+                conn.close()
+
+
+@contextmanager
+def couch_server(url):
+    """
+    Provide a connection to a couch server and cleanup after use.
+
+    For database creation and deletion we use an ephemeral connection to the
+    couch server. That connection has to be properly closed, so we provide it
+    as a context manager.
+
+    :param url: The URL of the Couch server.
+    :type url: str
+    """
+    session = Session(timeout=COUCH_TIMEOUT)
+    server = Server(url=url, session=session)
+    yield server
+    session.close_connections()
+
+
 class CouchDatabase(CommonBackend):
     """
     A U1DB implementation that uses CouchDB as its persistence layer.
@@ -353,7 +384,7 @@ class CouchDatabase(CommonBackend):
                      release_fun):
             """
             :param db: The database from where to get the document.
-            :type db: u1db.Database
+            :type db: CouchDatabase
             :param doc_id: The doc_id of the document to be retrieved.
             :type doc_id: str
             :param check_for_conflicts: Whether the get_doc() method should
@@ -380,7 +411,7 @@ class CouchDatabase(CommonBackend):
             self._release_fun()
 
     @classmethod
-    def open_database(cls, url, create, ensure_ddocs=False):
+    def open_database(cls, url, create, replica_uid=None, ensure_ddocs=False):
         """
         Open a U1DB database using CouchDB as backend.
 
@@ -388,6 +419,8 @@ class CouchDatabase(CommonBackend):
         :type url: str
         :param create: should the replica be created if it does not exist?
         :type create: bool
+        :param replica_uid: an optional unique replica identifier
+        :type replica_uid: str
         :param ensure_ddocs: Ensure that the design docs exist on server.
         :type ensure_ddocs: bool
 
@@ -400,16 +433,16 @@ class CouchDatabase(CommonBackend):
             raise InvalidURLError
         url = m.group(1)
         dbname = m.group(2)
-        server = Server(url=url)
-        try:
-            server[dbname]
-        except ResourceNotFound:
-            if not create:
-                raise DatabaseDoesNotExist()
-        return cls(url, dbname, ensure_ddocs=ensure_ddocs)
+        with couch_server(url) as server:
+            try:
+                server[dbname]
+            except ResourceNotFound:
+                if not create:
+                    raise DatabaseDoesNotExist()
+                server.create(dbname)
+        return cls(url, dbname, replica_uid=replica_uid, ensure_ddocs=ensure_ddocs)
 
-    def __init__(self, url, dbname, replica_uid=None, full_commit=True,
-                 session=None, ensure_ddocs=True):
+    def __init__(self, url, dbname, replica_uid=None, ensure_ddocs=True):
         """
         Create a new Couch data container.
 
@@ -419,31 +452,19 @@ class CouchDatabase(CommonBackend):
         :type dbname: str
         :param replica_uid: an optional unique replica identifier
         :type replica_uid: str
-        :param full_commit: turn on the X-Couch-Full-Commit header
-        :type full_commit: bool
-        :param session: an http.Session instance or None for a default session
-        :type session: http.Session
         :param ensure_ddocs: Ensure that the design docs exist on server.
         :type ensure_ddocs: bool
         """
         # save params
         self._url = url
-        self._full_commit = full_commit
-        if session is None:
-            session = Session(timeout=COUCH_TIMEOUT)
-        self._session = session
+        self._session = Session(timeout=COUCH_TIMEOUT)
         self._factory = CouchDocument
         self._real_replica_uid = None
         # configure couch
-        self._server = Server(url=self._url,
-                              full_commit=self._full_commit,
-                              session=self._session)
         self._dbname = dbname
-        try:
-            self._database = self._server[self._dbname]
-        except ResourceNotFound:
-            self._server.create(self._dbname)
-            self._database = self._server[self._dbname]
+        self._database = Database(
+            urljoin(self._url, self._dbname),
+            self._session)
         if replica_uid is not None:
             self._set_replica_uid(replica_uid)
         if ensure_ddocs:
@@ -482,7 +503,9 @@ class CouchDatabase(CommonBackend):
         """
         Delete a U1DB CouchDB database.
         """
-        del(self._server[self._dbname])
+        with couch_server(self._url) as server:
+            del(server[self._dbname])
+        self.close_connections()
 
     def close(self):
         """
@@ -491,13 +514,26 @@ class CouchDatabase(CommonBackend):
         :return: True if db was succesfully closed.
         :rtype: bool
         """
+        self.close_connections()
         self._url = None
         self._full_commit = None
         self._session = None
-        self._server = None
         self._database = None
         return True
 
+    def close_connections(self):
+        """
+        Close all open connections to the couch server.
+        """
+        if self._session is not None:
+            self._session.close_connections()
+
+    def __del__(self):
+        """
+        Close the database upon garbage collection.
+        """
+        self.close()
+
     def _set_replica_uid(self, replica_uid):
         """
         Force the replica uid to be set.
@@ -855,7 +891,9 @@ class CouchDatabase(CommonBackend):
         try:
             self._database.resource.put_json(
                 doc.doc_id, body=buf.getvalue(), headers=envelope.headers)
-            self._renew_couch_session()
+            # What follows is a workaround for an ugly bug. See:
+            # https://leap.se/code/issues/5448
+            self.close_connections()
         except ResourceConflict:
             raise RevisionConflict()
 
@@ -1411,7 +1449,7 @@ class CouchDatabase(CommonBackend):
         # strptime here by evaluating the conversion of an arbitrary date.
         # This will not be needed when/if we switch from python-couchdb to
         # paisley.
-        time.strptime('Mar 4 1917', '%b %d %Y')
+        time.strptime('Mar 8 1917', '%b %d %Y')
         # spawn threads to retrieve docs
         threads = []
         for doc_id in doc_ids:
@@ -1427,15 +1465,6 @@ class CouchDatabase(CommonBackend):
                 continue
             yield t._doc
 
-    def _renew_couch_session(self):
-        """
-        Create a new couch connection session.
-
-        This is a workaround for #5448. Will not be needed once bigcouch is
-        merged with couchdb.
-        """
-        self._database.resource.session = Session(timeout=COUCH_TIMEOUT)
-
 
 class CouchSyncTarget(CommonSyncTarget):
     """
@@ -1489,9 +1518,9 @@ class CouchServerState(ServerState):
         :return: The CouchDatabase object.
         :rtype: CouchDatabase
         """
-        return CouchDatabase.open_database(
-            self._couch_url + '/' + dbname,
-            create=False,
+        return CouchDatabase(
+            self._couch_url,
+            dbname,
             ensure_ddocs=False)
 
     def ensure_database(self, dbname):
diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py
index 86bb4b93c708c9f922b20d308080835cfa1e1b41..17d4a5196c2b2d15c86d3ddcf5c53c8b53648dc8 100644
--- a/common/src/leap/soledad/common/tests/test_couch.py
+++ b/common/src/leap/soledad/common/tests/test_couch.py
@@ -25,9 +25,10 @@ import copy
 import shutil
 from base64 import b64decode
 from mock import Mock
+from urlparse import urljoin
 
-from couchdb.client import Server
 from u1db import errors as u1db_errors
+from couchdb.client import Server
 
 from leap.common.files import mkdir_p
 
@@ -151,8 +152,11 @@ class CouchDBTestCase(unittest.TestCase):
 class TestCouchBackendImpl(CouchDBTestCase):
 
     def test__allocate_doc_id(self):
-        db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
-                                 'u1db_tests', ensure_ddocs=True)
+        db = couch.CouchDatabase.open_database(
+            urljoin(
+                'http://localhost:' + str(self.wrapper.port), 'u1db_tests'),
+                create=True,
+                ensure_ddocs=True)
         doc_id1 = db._allocate_doc_id()
         self.assertTrue(doc_id1.startswith('D-'))
         self.assertEqual(34, len(doc_id1))
@@ -166,28 +170,36 @@ class TestCouchBackendImpl(CouchDBTestCase):
 
 def make_couch_database_for_test(test, replica_uid):
     port = str(test.wrapper.port)
-    return couch.CouchDatabase('http://localhost:' + port, replica_uid,
-                               replica_uid=replica_uid or 'test',
-                               ensure_ddocs=True)
+    return couch.CouchDatabase.open_database(
+        urljoin('http://localhost:' + port, replica_uid),
+        create=True,
+        replica_uid=replica_uid or 'test',
+        ensure_ddocs=True)
 
 
 def copy_couch_database_for_test(test, db):
     port = str(test.wrapper.port)
     couch_url = 'http://localhost:' + port
     new_dbname = db._replica_uid + '_copy'
-    new_db = couch.CouchDatabase(couch_url,
-                                 new_dbname,
-                                 replica_uid=db._replica_uid or 'test')
+    new_db = couch.CouchDatabase.open_database(
+        urljoin(couch_url, new_dbname),
+        create=True,
+        replica_uid=db._replica_uid or 'test')
     # copy all docs
-    old_couch_db = Server(couch_url)[db._replica_uid]
-    new_couch_db = Server(couch_url)[new_dbname]
+    session = couch.Session()
+    old_couch_db = Server(couch_url, session=session)[db._replica_uid]
+    new_couch_db = Server(couch_url, session=session)[new_dbname]
     for doc_id in old_couch_db:
         doc = old_couch_db.get(doc_id)
+        # bypass u1db_config document
+        if doc_id == 'u1db_config':
+            pass
         # copy design docs
-        if ('u1db_rev' not in doc):
+        elif doc_id.startswith('_design'):
+            del doc['_rev']
             new_couch_db.save(doc)
         # copy u1db docs
-        else:
+        elif 'u1db_rev' in doc:
             new_doc = {
                 '_id': doc['_id'],
                 'u1db_transactions': doc['u1db_transactions'],
@@ -206,6 +218,8 @@ def copy_couch_database_for_test(test, db):
                 if (att is not None):
                     new_couch_db.put_attachment(new_doc, att,
                                                 filename=att_name)
+    # cleanup connections to prevent file descriptor leaking
+    session.close_connections()
     return new_db
 
 
@@ -228,7 +242,7 @@ class CouchTests(test_backends.AllDatabaseTests, CouchDBTestCase):
     def setUp(self):
         test_backends.AllDatabaseTests.setUp(self)
         # save db info because of test_close
-        self._server = self.db._server
+        self._url = self.db._url
         self._dbname = self.db._dbname
 
     def tearDown(self):
@@ -238,7 +252,10 @@ class CouchTests(test_backends.AllDatabaseTests, CouchDBTestCase):
         if self.id() == \
                 'leap.soledad.common.tests.test_couch.CouchTests.' \
                 'test_close(couch)':
-            del(self._server[self._dbname])
+            session = couch.Session()
+            server = Server(url=self._url, session=session)
+            del(server[self._dbname])
+            session.close_connections()
         else:
             self.db.delete_database()
         test_backends.AllDatabaseTests.tearDown(self)
@@ -355,10 +372,9 @@ from u1db.backends.inmemory import InMemoryIndex
 
 class IndexedCouchDatabase(couch.CouchDatabase):
 
-    def __init__(self, url, dbname, replica_uid=None, full_commit=True,
-                     session=None, ensure_ddocs=True):
-        old_class.__init__(self, url, dbname, replica_uid, full_commit,
-                           session, ensure_ddocs=ensure_ddocs)
+    def __init__(self, url, dbname, replica_uid=None, ensure_ddocs=True):
+        old_class.__init__(self, url, dbname, replica_uid=replica_uid, 
+                           ensure_ddocs=ensure_ddocs)
         self._indexes = {}
 
     def _put_doc(self, old_doc, doc):
@@ -447,19 +463,22 @@ class CouchDatabaseSyncTests(test_sync.DatabaseSyncTests, CouchDBTestCase):
         self.db1 = None
         self.db2 = None
         self.db3 = None
+        self.db1_copy = None
+        self.db2_copy = None
         test_sync.DatabaseSyncTests.setUp(self)
 
     def tearDown(self):
-        self.db and self.db.delete_database()
-        self.db1 and self.db1.delete_database()
-        self.db2 and self.db2.delete_database()
-        self.db3 and self.db3.delete_database()
-        db = self.create_database('test1_copy', 'source')
-        db.delete_database()
-        db = self.create_database('test2_copy', 'target')
-        db.delete_database()
-        db = self.create_database('test3', 'target')
-        db.delete_database()
+        for db in [self.db, self.db1, self.db2, self.db3, self.db1_copy,
+                self.db2_copy]:
+            if db is not None:
+                db.delete_database()
+                db.close()
+        for replica_uid, dbname in [('test1_copy', 'source'),
+                ('test2_copy', 'target'), ('test3', 'target')]:
+            db = self.create_database(replica_uid, dbname)
+            db.delete_database()
+            # cleanup connections to avoid leaking of file descriptors
+            db.close()
         test_sync.DatabaseSyncTests.tearDown(self)
 
 
@@ -467,12 +486,14 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
 
     def setUp(self):
         CouchDBTestCase.setUp(self)
-        self.db = couch.CouchDatabase(
-            'http://127.0.0.1:%d' % self.wrapper.port, 'test',
+        self.db = couch.CouchDatabase.open_database(
+            urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+            create=True,
             ensure_ddocs=False)  # note that we don't enforce ddocs here
 
     def tearDown(self):
         self.db.delete_database()
+        self.db.close()
 
     def test_missing_design_doc_raises(self):
         """
@@ -509,8 +530,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
         Test that all methods that access design documents list functions
         will raise if the functions are not present.
         """
-        self.db = couch.CouchDatabase(
-            'http://127.0.0.1:%d' % self.wrapper.port, 'test',
+        self.db = couch.CouchDatabase.open_database(
+            urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+            create=True,
             ensure_ddocs=True)
         # erase views from _design/transactions
         transactions = self.db._database['_design/transactions']
@@ -538,8 +560,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
         Test that all methods that access design documents list functions
         will raise if the functions are not present.
         """
-        self.db = couch.CouchDatabase(
-            'http://127.0.0.1:%d' % self.wrapper.port, 'test',
+        self.db = couch.CouchDatabase.open_database(
+            urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+            create=True,
             ensure_ddocs=True)
         # erase views from _design/transactions
         transactions = self.db._database['_design/transactions']
@@ -567,8 +590,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
         Test that all methods that access design documents' named views  will
         raise if the views are not present.
         """
-        self.db = couch.CouchDatabase(
-            'http://127.0.0.1:%d' % self.wrapper.port, 'test',
+        self.db = couch.CouchDatabase.open_database(
+            urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+            create=True,
             ensure_ddocs=True)
         # erase views from _design/docs
         docs = self.db._database['_design/docs']
@@ -608,8 +632,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
         Test that all methods that access design documents will raise if the
         design docs are not present.
         """
-        self.db = couch.CouchDatabase(
-            'http://127.0.0.1:%d' % self.wrapper.port, 'test',
+        self.db = couch.CouchDatabase.open_database(
+            urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+            create=True,
             ensure_ddocs=True)
         # delete _design/docs
         del self.db._database['_design/docs']
diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
index 3c457cc5531abe5ae4b8ef520bcff9e81778edeb..b03f79e7c478e1de41c3c5dc2f9e8a44cf3f4983 100644
--- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
+++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
@@ -24,6 +24,10 @@ import mock
 import tempfile
 import threading
 
+
+from urlparse import urljoin
+
+
 from leap.soledad.client import Soledad
 from leap.soledad.common.couch import CouchDatabase, CouchServerState
 from leap.soledad.common.tests.test_couch import CouchDBTestCase
@@ -101,12 +105,16 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
         TestCaseWithServer.setUp(self)
         CouchDBTestCase.setUp(self)
         self._couch_url = 'http://localhost:' + str(self.wrapper.port)
-        self.db = CouchDatabase(
-            self._couch_url, 'user-user-uuid', replica_uid='replica')
+        self.db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'user-user-uuid'),
+            create=True,
+            replica_uid='replica',
+            ensure_ddocs=True)
         self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
 
     def tearDown(self):
         self.db.delete_database()
+        self.db.close()
         CouchDBTestCase.tearDown(self)
         TestCaseWithServer.tearDown(self)
 
@@ -211,6 +219,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
 
         _create_docs_and_sync(sol, 0)
         _create_docs_and_sync(sol, 1)
+        sol.close()
 
     #
     # Concurrency tests
@@ -344,6 +353,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
             self.assertEqual(
                 1,
                 len(filter(lambda t: t[0] == doc_id, transaction_log)))
+        sol.close()
 
     def test_concurrent_syncs_do_not_fail(self):
         """
@@ -387,3 +397,4 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
             self.assertEqual(
                 1,
                 len(filter(lambda t: t[0] == doc_id, transaction_log)))
+        sol.close()
diff --git a/common/src/leap/soledad/common/tests/test_crypto.py b/common/src/leap/soledad/common/tests/test_crypto.py
index af11bc7681bd0795fd5a003f8b0ec0556f02d493..4b2470ba3ae2c7cfe322ce88b212234252543cac 100644
--- a/common/src/leap/soledad/common/tests/test_crypto.py
+++ b/common/src/leap/soledad/common/tests/test_crypto.py
@@ -44,7 +44,6 @@ from leap.soledad.common.crypto import WrongMac, UnknownMacMethod
 from leap.soledad.common.tests.u1db_tests import (
     simple_doc,
     nested_doc,
-    TestCaseWithServer,
 )
 
 
@@ -95,6 +94,7 @@ class RecoveryDocumentTestCase(BaseSoledadTest):
         self.assertEqual(self._soledad._get_storage_secret(),
                          s._get_storage_secret(),
                          'Failed settinng secret for symmetric encryption.')
+        s.close()
 
 
 class SoledadSecretsTestCase(BaseSoledadTest):
@@ -110,6 +110,7 @@ class SoledadSecretsTestCase(BaseSoledadTest):
         # generate new secret
         secret_id_2 = sol._gen_secret()
         self.assertTrue(secret_id_1 != secret_id_2)
+        sol.close()
         # re-instantiate
         sol = self._soledad_instance(
             user='user@leap.se',
@@ -130,6 +131,7 @@ class SoledadSecretsTestCase(BaseSoledadTest):
         # assert id is hash of new secret
         self.assertTrue(
             secret_id_2 == hashlib.sha256(sol.storage_secret).hexdigest())
+        sol.close()
 
     def test__has_secret(self):
         sol = self._soledad_instance(
@@ -144,6 +146,7 @@ class SoledadSecretsTestCase(BaseSoledadTest):
         # but not being able to decrypt correctly should
         sol._secrets[sol.secret_id][sol.SECRET_KEY] = None
         self.assertFalse(sol._has_secret())
+        sol.close()
 
 
 class MacAuthTestCase(BaseSoledadTest):
diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py
index f8d2a64f15958a18d18619d6be8ea188fdff9aeb..1c5a740762421333ef1651fa7c950b65d23aa01e 100644
--- a/common/src/leap/soledad/common/tests/test_server.py
+++ b/common/src/leap/soledad/common/tests/test_server.py
@@ -27,6 +27,7 @@ import mock
 import time
 import binascii
 
+from urlparse import urljoin
 
 from leap.common.testing.basetest import BaseLeapTest
 from leap.soledad.common.couch import (
@@ -56,7 +57,8 @@ from leap.soledad.server.auth import URLToAuthorization
 def _couch_ensure_database(self, dbname):
     db = CouchDatabase.open_database(
         self._couch_url + '/' + dbname,
-        create=True)
+        create=True,
+        ensure_ddocs=True)
     return db, db._replica_uid
 
 CouchServerState.ensure_database = _couch_ensure_database
@@ -352,11 +354,10 @@ class EncryptedSyncTestCase(
         self.assertEqual([], doclist)
         doc1 = sol1.create_doc(json.loads(simple_doc))
         # ensure remote db exists before syncing
-        db = CouchDatabase(
-            self._couch_url,
-            # the name of the user database is "user-<uuid>".
-            'user-user-uuid',
-        )
+        db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'user-user-uuid'),
+            create=True,
+            ensure_ddocs=True)
         # sync with server
         sol1._server_url = self.getURL()
         sol1.sync()
@@ -390,6 +391,9 @@ class EncryptedSyncTestCase(
         # assert incoming doc is equal to the first sent doc
         self.assertEqual(doc1, doc2)
         db.delete_database()
+        db.close()
+        sol1.close()
+        sol2.close()
 
     def test_encrypted_sym_sync_with_unicode_passphrase(self):
         """
@@ -408,11 +412,10 @@ class EncryptedSyncTestCase(
         self.assertEqual([], doclist)
         doc1 = sol1.create_doc(json.loads(simple_doc))
         # ensure remote db exists before syncing
-        db = CouchDatabase(
-            self._couch_url,
-            # the name of the user database is "user-<uuid>".
-            'user-user-uuid',
-        )
+        db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'user-user-uuid'),
+            create=True,
+            ensure_ddocs=True)
         # sync with server
         sol1._server_url = self.getURL()
         sol1.sync()
@@ -450,6 +453,9 @@ class EncryptedSyncTestCase(
         # assert incoming doc is equal to the first sent doc
         self.assertEqual(doc1, doc2)
         db.delete_database()
+        db.close()
+        sol1.close()
+        sol2.close()
 
     def test_sync_very_large_files(self):
         """
@@ -468,11 +474,10 @@ class EncryptedSyncTestCase(
         content = binascii.hexlify(os.urandom(length/2))  # len() == length
         doc1 = sol1.create_doc({'data': content})
         # ensure remote db exists before syncing
-        db = CouchDatabase(
-            self._couch_url,
-            # the name of the user database is "user-<uuid>".
-            'user-user-uuid',
-        )
+        db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'user-user-uuid'),
+            create=True,
+            ensure_ddocs=True)
         # sync with server
         sol1._server_url = self.getURL()
         sol1.sync()
@@ -493,6 +498,9 @@ class EncryptedSyncTestCase(
         self.assertEqual(doc1, doc2)
         # delete remote database
         db.delete_database()
+        db.close()
+        sol1.close()
+        sol2.close()
 
 
     def test_sync_many_small_files(self):
@@ -512,11 +520,10 @@ class EncryptedSyncTestCase(
         for i in range(0, number_of_docs):
             sol1.create_doc(json.loads(simple_doc))
         # ensure remote db exists before syncing
-        db = CouchDatabase(
-            self._couch_url,
-            # the name of the user database is "user-<uuid>".
-            'user-user-uuid',
-        )
+        db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'user-user-uuid'),
+            create=True,
+            ensure_ddocs=True)
         # sync with server
         sol1._server_url = self.getURL()
         sol1.sync()
@@ -537,6 +544,9 @@ class EncryptedSyncTestCase(
             self.assertEqual(sol1.get_doc(doc.doc_id), doc)
         # delete remote database
         db.delete_database()
+        db.close()
+        sol1.close()
+        sol2.close()
 
 class LockResourceTestCase(
         CouchDBTestCase, TestCaseWithServer):
@@ -558,8 +568,14 @@ class LockResourceTestCase(
         self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
         self._couch_url = 'http://localhost:' + str(self.wrapper.port)
         # create the databases
-        CouchDatabase(self._couch_url, 'shared')
-        CouchDatabase(self._couch_url, 'tokens')
+        CouchDatabase.open_database(
+            urljoin(self._couch_url, 'shared'),
+            create=True,
+            ensure_ddocs=True)
+        CouchDatabase.open_database(
+            urljoin(self._couch_url, 'tokens'),
+            create=True,
+            ensure_ddocs=True)
         self._state = CouchServerState(
             self._couch_url, 'shared', 'tokens')
 
@@ -567,10 +583,10 @@ class LockResourceTestCase(
         CouchDBTestCase.tearDown(self)
         TestCaseWithServer.tearDown(self)
         # delete remote database
-        db = CouchDatabase(
-            self._couch_url,
-            'shared',
-        )
+        db = CouchDatabase.open_database(
+            urljoin(self._couch_url, 'shared'),
+            create=True,
+            ensure_ddocs=True)
         db.delete_database()
 
     def test__try_obtain_filesystem_lock(self):
diff --git a/common/src/leap/soledad/common/tests/test_soledad.py b/common/src/leap/soledad/common/tests/test_soledad.py
index 035c5ac5e1d1ef5b191235b846fad56970e84ba8..5a3bf2b01bbdfb2d7abd5d1d86f6890080ce7bc8 100644
--- a/common/src/leap/soledad/common/tests/test_soledad.py
+++ b/common/src/leap/soledad/common/tests/test_soledad.py
@@ -49,6 +49,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
         secrets_path = os.path.dirname(sol.secrets_path)
         self.assertTrue(os.path.isdir(local_db_dir))
         self.assertTrue(os.path.isdir(secrets_path))
+        sol.close()
 
     def test__init_db(self):
         sol = self._soledad_instance()
@@ -61,6 +62,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
         sol._init_db()
         from leap.soledad.client.sqlcipher import SQLCipherDatabase
         self.assertIsInstance(sol._db, SQLCipherDatabase)
+        sol.close()
 
     def test__init_config_defaults(self):
         """
@@ -103,6 +105,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
             os.path.join(self.tempdir, 'value_2'),
             sol.local_db_path)
         self.assertEqual('value_1', sol.server_url)
+        sol.close()
 
     def test_change_passphrase(self):
         """
@@ -118,6 +121,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
 
         # change the passphrase
         sol.change_passphrase(u'654321')
+        sol.close()
 
         self.assertRaises(
             WrongMac,
@@ -132,6 +136,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
             prefix=self.rand_prefix)
         doc2 = sol2.get_doc(doc_id)
         self.assertEqual(doc, doc2)
+        sol2.close()
 
     def test_change_passphrase_with_short_passphrase_raises(self):
         """
@@ -145,6 +150,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
         self.assertRaises(
             PassphraseTooShort,
             sol.change_passphrase, u'54321')
+        sol.close()
 
     def test_get_passphrase(self):
         """
@@ -152,6 +158,7 @@ class AuxMethodsTestCase(BaseSoledadTest):
         """
         sol = self._soledad_instance()
         self.assertEqual('123', sol.passphrase)
+        sol.close()
 
 
 class SoledadSharedDBTestCase(BaseSoledadTest):
@@ -165,6 +172,9 @@ class SoledadSharedDBTestCase(BaseSoledadTest):
             'https://provider/', ADDRESS, document_factory=SoledadDocument,
             creds=None)
 
+    def tearDown(self):
+        BaseSoledadTest.tearDown(self)
+
     def test__get_secrets_from_shared_db(self):
         """
         Ensure the shared db is queried with the correct doc_id.
@@ -209,7 +219,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
         BaseSoledadTest.setUp(self)
 
     def tearDown(self):
-        pass
+        BaseSoledadTest.tearDown(self)
 
     def _pop_mock_call(self, mocked):
         mocked.call_args_list.pop()
@@ -283,6 +293,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
         # assert db was locked and unlocked
         sol._shared_db.lock.assert_called_with()
         sol._shared_db.unlock.assert_called_with('atoken')
+        sol.close()
 
     def test_stage2_bootstrap_signals(self):
         """
@@ -305,6 +316,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
             def __call__(self):
                 return self
 
+        sol.close()
         # reset mock
         soledad.client.signal.reset_mock()
         # get a fresh instance so it emits all bootstrap signals
@@ -328,6 +340,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
             proto.SOLEDAD_DONE_DOWNLOADING_KEYS,
             ADDRESS,
         )
+        sol.close()
 
     def test_stage1_bootstrap_signals(self):
         """
@@ -337,6 +350,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
         # get an existent instance so it emits only some of bootstrap signals
         sol = self._soledad_instance()
         self.assertEqual([], soledad.client.signal.mock_calls)
+        sol.close()
 
     def test_sync_signals(self):
         """
@@ -355,6 +369,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
             proto.SOLEDAD_DONE_DATA_SYNC,
             ADDRESS,
         )
+        sol.close()
 
     def test_need_sync_signals(self):
         """
@@ -375,3 +390,4 @@ class SoledadSignalingTestCase(BaseSoledadTest):
             ADDRESS,
         )
         SoledadSyncTarget.get_sync_info = old_get_sync_info
+        sol.close()
diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher.py b/common/src/leap/soledad/common/tests/test_sqlcipher.py
index 66a673b6375f5a1baae38fa49d551655a098f7a2..c79a6045ef0179a483b2ab2d415f763fad0a1fc6 100644
--- a/common/src/leap/soledad/common/tests/test_sqlcipher.py
+++ b/common/src/leap/soledad/common/tests/test_sqlcipher.py
@@ -469,11 +469,9 @@ class SQLCipherDatabaseSyncTests(
 
     def setUp(self):
         test_sync.DatabaseSyncTests.setUp(self)
-        BaseSoledadTest.setUp(self)
 
     def tearDown(self):
         test_sync.DatabaseSyncTests.tearDown(self)
-        BaseSoledadTest.tearDown(self)
 
     def test_sync_autoresolves(self):
         """
@@ -683,11 +681,9 @@ class SQLCipherSyncTargetTests(
 
     def setUp(self):
         test_sync.DatabaseSyncTargetTests.setUp(self)
-        #BaseSoledadTest.setUp(self)
 
     def tearDown(self):
         test_sync.DatabaseSyncTargetTests.tearDown(self)
-        BaseSoledadTest.tearDown(self)
 
     def test_sync_exchange(self):
         """
diff --git a/common/src/leap/soledad/common/tests/test_target.py b/common/src/leap/soledad/common/tests/test_target.py
index 5a5417450c00459d576faeb1cc0ec81577a4e47e..c1e00d527fb57a835171e7108902b6c0ae5b452f 100644
--- a/common/src/leap/soledad/common/tests/test_target.py
+++ b/common/src/leap/soledad/common/tests/test_target.py
@@ -272,11 +272,9 @@ class TestSoledadParsingSyncStream(
 
     def setUp(self):
         test_remote_sync_target.TestParsingSyncStream.setUp(self)
-        BaseSoledadTest.setUp(self)
 
     def tearDown(self):
         test_remote_sync_target.TestParsingSyncStream.tearDown(self)
-        BaseSoledadTest.tearDown(self)
 
     def test_extra_comma(self):
         """
@@ -646,6 +644,7 @@ class SoledadDatabaseSyncTargetTests(
         self.assertEqual(([], 1, last_trans_id),
                          (self.other_changes, new_gen, last_trans_id))
         self.assertEqual(10, self.st.get_sync_info('replica')[3])
+        sol.close()
 
     def test_sync_exchange_push_many(self):
         """
diff --git a/common/src/leap/soledad/common/tests/u1db_tests/__init__.py b/common/src/leap/soledad/common/tests/u1db_tests/__init__.py
index 3bc12487513164d0e92b3eac984d68fbe5eafe3e..99ff77b477c50e18526eaa13b23eab015243142b 100644
--- a/common/src/leap/soledad/common/tests/u1db_tests/__init__.py
+++ b/common/src/leap/soledad/common/tests/u1db_tests/__init__.py
@@ -208,8 +208,8 @@ class DatabaseBaseTests(TestCase):
         self.db = self.create_database('test')
 
     def tearDown(self):
-        # TODO: Add close_database parameterization
-        # self.close_database(self.db)
+        if hasattr(self, 'db') and self.db is not None:
+            self.db.close()
         super(DatabaseBaseTests, self).tearDown()
 
     def assertTransactionLog(self, doc_ids, db):
@@ -335,6 +335,13 @@ class TestCaseWithServer(TestCase):
         super(TestCaseWithServer, self).setUp()
         self.server = self.server_thread = None
 
+    def tearDown(self):
+        if self.server is not None:
+            self.server.shutdown()
+            self.server_thread.join()
+            self.server.server_close()
+        super(TestCaseWithServer, self).tearDown()
+
     @property
     def url_scheme(self):
         return self.server_def()[-1]
diff --git a/common/src/leap/soledad/common/tests/u1db_tests/test_backends.py b/common/src/leap/soledad/common/tests/u1db_tests/test_backends.py
index d2a91d11c5a7894c67880f10c5d5189c499e167e..c0a7e1f7ff3ddae7826e8c600f855911438df44d 100644
--- a/common/src/leap/soledad/common/tests/u1db_tests/test_backends.py
+++ b/common/src/leap/soledad/common/tests/u1db_tests/test_backends.py
@@ -355,6 +355,9 @@ class LocalDatabaseTests(tests.DatabaseBaseTests):
 
     scenarios = tests.LOCAL_DATABASES_SCENARIOS
 
+    def setUp(self):
+        tests.DatabaseBaseTests.setUp(self)
+
     def test_create_doc_different_ids_diff_db(self):
         doc1 = self.db.create_doc_from_json(simple_doc)
         db2 = self.create_database('other-uid')
diff --git a/common/src/leap/soledad/common/tests/u1db_tests/test_sync.py b/common/src/leap/soledad/common/tests/u1db_tests/test_sync.py
index 1f78f9126274fc2914364818ee39ad6fc5688503..a37c36db1e3da026191ee8a4780ab433b640b646 100644
--- a/common/src/leap/soledad/common/tests/u1db_tests/test_sync.py
+++ b/common/src/leap/soledad/common/tests/u1db_tests/test_sync.py
@@ -85,7 +85,6 @@ class DatabaseSyncTargetTests(tests.DatabaseBaseTests,
     whitebox = True
 
     def setUp(self):
-        tests.DatabaseBaseTests.setUp(self)
         tests.TestCaseWithServer.setUp(self)
         self.db, self.st = self.create_db_and_target(self)
         self.other_changes = []
@@ -94,7 +93,6 @@ class DatabaseSyncTargetTests(tests.DatabaseBaseTests,
         # We delete them explicitly, so that connections are cleanly closed
         del self.st
         self.db.close()
-        del self.db
         super(DatabaseSyncTargetTests, self).tearDown()
 
     def create_db_and_target(self, *args):
@@ -1013,30 +1011,30 @@ class DatabaseSyncTests(tests.DatabaseBaseTests,
     def test_sync_supersedes_conflicts(self):
         self.db1 = self.create_database('test1', 'both')
         self.db2 = self.create_database('test2', 'target')
-        db3 = self.create_database('test3', 'both')
+        self.db3 = self.create_database('test3', 'both')
         doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
         self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc')
-        db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
-        self.sync(db3, self.db1)
+        self.db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
+        self.sync(self.db3, self.db1)
         self.assertEqual(
             self.db1._get_generation_info(),
-            db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
+            self.db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
         self.assertEqual(
-            db3._get_generation_info(),
-            self.db1._get_replica_gen_and_trans_id(db3._replica_uid))
-        self.sync(db3, self.db2)
+            self.db3._get_generation_info(),
+            self.db1._get_replica_gen_and_trans_id(self.db3._replica_uid))
+        self.sync(self.db3, self.db2)
         self.assertEqual(
             self.db2._get_generation_info(),
-            db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
+            self.db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
         self.assertEqual(
-            db3._get_generation_info(),
-            self.db2._get_replica_gen_and_trans_id(db3._replica_uid))
-        self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
+            self.db3._get_generation_info(),
+            self.db2._get_replica_gen_and_trans_id(self.db3._replica_uid))
+        self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
         doc1.set_json('{"a": 2}')
         self.db1.put_doc(doc1)
-        self.sync(db3, self.db1)
+        self.sync(self.db3, self.db1)
         # original doc1 should have been removed from conflicts
-        self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
+        self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
 
     def test_sync_stops_after_get_sync_info(self):
         self.db1 = self.create_database('test1', 'source')
@@ -1054,70 +1052,70 @@ class DatabaseSyncTests(tests.DatabaseBaseTests,
         self.db2 = self.create_database('test2', 'target')
         self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
         self.sync(self.db1, self.db2)
-        db1_copy = self.copy_database(self.db1)
+        self.db1_copy = self.copy_database(self.db1)
         self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
         self.sync(self.db1, self.db2)
         self.assertRaises(
-            errors.InvalidGeneration, self.sync, db1_copy, self.db2)
+            errors.InvalidGeneration, self.sync, self.db1_copy, self.db2)
 
     def test_sync_detects_rollback_in_target(self):
         self.db1 = self.create_database('test1', 'source')
         self.db2 = self.create_database('test2', 'target')
         self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
         self.sync(self.db1, self.db2)
-        db2_copy = self.copy_database(self.db2)
+        self.db2_copy = self.copy_database(self.db2)
         self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
         self.sync(self.db1, self.db2)
         self.assertRaises(
-            errors.InvalidGeneration, self.sync, self.db1, db2_copy)
+            errors.InvalidGeneration, self.sync, self.db1, self.db2_copy)
 
     def test_sync_detects_diverged_source(self):
         self.db1 = self.create_database('test1', 'source')
         self.db2 = self.create_database('test2', 'target')
-        db3 = self.copy_database(self.db1)
+        self.db3 = self.copy_database(self.db1)
         self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
-        db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
+        self.db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
         self.sync(self.db1, self.db2)
         self.assertRaises(
-            errors.InvalidTransactionId, self.sync, db3, self.db2)
+            errors.InvalidTransactionId, self.sync, self.db3, self.db2)
 
     def test_sync_detects_diverged_target(self):
         self.db1 = self.create_database('test1', 'source')
         self.db2 = self.create_database('test2', 'target')
-        db3 = self.copy_database(self.db2)
-        db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
+        self.db3 = self.copy_database(self.db2)
+        self.db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
         self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
         self.sync(self.db1, self.db2)
         self.assertRaises(
-            errors.InvalidTransactionId, self.sync, self.db1, db3)
+            errors.InvalidTransactionId, self.sync, self.db1, self.db3)
 
     def test_sync_detects_rollback_and_divergence_in_source(self):
         self.db1 = self.create_database('test1', 'source')
         self.db2 = self.create_database('test2', 'target')
         self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
         self.sync(self.db1, self.db2)
-        db1_copy = self.copy_database(self.db1)
+        self.db1_copy = self.copy_database(self.db1)
         self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
         self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3')
         self.sync(self.db1, self.db2)
-        db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
-        db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
+        self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
+        self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
         self.assertRaises(
-            errors.InvalidTransactionId, self.sync, db1_copy, self.db2)
+            errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2)
 
     def test_sync_detects_rollback_and_divergence_in_target(self):
         self.db1 = self.create_database('test1', 'source')
         self.db2 = self.create_database('test2', 'target')
         self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
         self.sync(self.db1, self.db2)
-        db2_copy = self.copy_database(self.db2)
+        self.db2_copy = self.copy_database(self.db2)
         self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
         self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3')
         self.sync(self.db1, self.db2)
-        db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
-        db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
+        self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
+        self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
         self.assertRaises(
-            errors.InvalidTransactionId, self.sync, self.db1, db2_copy)
+            errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy)
 
 
 class TestDbSync(tests.TestCaseWithServer):
diff --git a/scripts/compile_design_docs.py b/scripts/compile_design_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ffebb1014c462f0fa4ddd95e8f2af4327e22e3b
--- /dev/null
+++ b/scripts/compile_design_docs.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+
+
+# This script builds files for the design documents represented in the
+# ../common/src/soledad/common/ddocs directory structure (relative to the
+# current location of the script) into a target directory.
+
+
+import argparse
+from os import listdir
+from os.path import realpath, dirname, isdir, join, isfile, basename
+import json
+
+DDOCS_REL_PATH = ('..', 'common', 'src', 'leap', 'soledad', 'common', 'ddocs')
+
+
+def build_ddocs():
+    """
+    Build design documents.
+
+    For ease of development, couch backend design documents are stored as
+    `.js` files in  subdirectories of
+    `../common/src/leap/soledad/common/ddocs`. This function scans that
+    directory for javascript files, and builds the design documents structure.
+
+    This funciton uses the following conventions to generate design documents:
+
+      - Design documents are represented by directories in the form
+        `<prefix>/<ddoc>`, there prefix is the `src/leap/soledad/common/ddocs`
+        directory.
+      - Design document directories might contain `views`, `lists` and
+        `updates` subdirectories.
+      - Views subdirectories must contain a `map.js` file and may contain a
+        `reduce.js` file.
+      - List and updates subdirectories may contain any number of javascript
+        files (i.e. ending in `.js`) whose names will be mapped to the
+        corresponding list or update function name.
+    """
+    ddocs = {}
+
+    # design docs are represented by subdirectories of `DDOCS_REL_PATH`
+    cur_pwd = dirname(realpath(__file__))
+    ddocs_path = join(cur_pwd, *DDOCS_REL_PATH)
+    for ddoc in [f for f in listdir(ddocs_path)
+                 if isdir(join(ddocs_path, f))]:
+
+        ddocs[ddoc] = {'_id': '_design/%s' % ddoc}
+
+        for t in ['views', 'lists', 'updates']:
+            tdir = join(ddocs_path, ddoc, t)
+            if isdir(tdir):
+
+                ddocs[ddoc][t] = {}
+
+                if t == 'views':  # handle views (with map/reduce functions)
+                    for view in [f for f in listdir(tdir)
+                                 if isdir(join(tdir, f))]:
+                        # look for map.js and reduce.js
+                        mapfile = join(tdir, view, 'map.js')
+                        reducefile = join(tdir, view, 'reduce.js')
+                        mapfun = None
+                        reducefun = None
+                        try:
+                            with open(mapfile) as f:
+                                mapfun = f.read()
+                        except IOError:
+                            pass
+                        try:
+                            with open(reducefile) as f:
+                                reducefun = f.read()
+                        except IOError:
+                            pass
+                        ddocs[ddoc]['views'][view] = {}
+
+                        if mapfun is not None:
+                            ddocs[ddoc]['views'][view]['map'] = mapfun
+                        if reducefun is not None:
+                            ddocs[ddoc]['views'][view]['reduce'] = reducefun
+
+                else:  # handle lists, updates, etc
+                    for fun in [f for f in listdir(tdir)
+                                if isfile(join(tdir, f))]:
+                        funfile = join(tdir, fun)
+                        funname = basename(funfile).replace('.js', '')
+                        try:
+                            with open(funfile) as f:
+                                ddocs[ddoc][t][funname] = f.read()
+                        except IOError:
+                            pass
+    return ddocs
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        'target', type=str,
+        help='the target dir where to store design documents')
+    args = parser.parse_args()
+
+    # check if given target is a directory
+    if not isdir(args.target):
+        print 'Error: %s is not a directory.' % args.target
+        exit(1)
+
+    # write desifgn docs files
+    ddocs = build_ddocs()
+    for ddoc in ddocs:
+        ddoc_filename = "%s.json" % ddoc
+        with open(join(args.target, ddoc_filename), 'w') as f:
+            f.write("%s" % json.dumps(ddocs[ddoc], indent=3))
+        print "Wrote _design/%s content in %s" % (ddoc, join(args.target, ddoc_filename,))
diff --git a/scripts/update_design_docs.py b/scripts/ddocs/update_design_docs.py
similarity index 100%
rename from scripts/update_design_docs.py
rename to scripts/ddocs/update_design_docs.py
diff --git a/scripts/backends_cpu_usage/log_cpu_usage.py b/scripts/profiling/backends_cpu_usage/log_cpu_usage.py
similarity index 100%
rename from scripts/backends_cpu_usage/log_cpu_usage.py
rename to scripts/profiling/backends_cpu_usage/log_cpu_usage.py
diff --git a/scripts/backends_cpu_usage/movingaverage.py b/scripts/profiling/backends_cpu_usage/movingaverage.py
similarity index 100%
rename from scripts/backends_cpu_usage/movingaverage.py
rename to scripts/profiling/backends_cpu_usage/movingaverage.py
diff --git a/scripts/backends_cpu_usage/plot.py b/scripts/profiling/backends_cpu_usage/plot.py
similarity index 100%
rename from scripts/backends_cpu_usage/plot.py
rename to scripts/profiling/backends_cpu_usage/plot.py
diff --git a/scripts/backends_cpu_usage/test_u1db_sync.py b/scripts/profiling/backends_cpu_usage/test_u1db_sync.py
similarity index 100%
rename from scripts/backends_cpu_usage/test_u1db_sync.py
rename to scripts/profiling/backends_cpu_usage/test_u1db_sync.py
diff --git a/scripts/doc_put_memory_usage/find_max_upload_size.py b/scripts/profiling/doc_put_memory_usage/find_max_upload_size.py
similarity index 100%
rename from scripts/doc_put_memory_usage/find_max_upload_size.py
rename to scripts/profiling/doc_put_memory_usage/find_max_upload_size.py
diff --git a/scripts/doc_put_memory_usage/get-mem.py b/scripts/profiling/doc_put_memory_usage/get-mem.py
similarity index 100%
rename from scripts/doc_put_memory_usage/get-mem.py
rename to scripts/profiling/doc_put_memory_usage/get-mem.py
diff --git a/scripts/doc_put_memory_usage/plot-mem.py b/scripts/profiling/doc_put_memory_usage/plot-mem.py
similarity index 100%
rename from scripts/doc_put_memory_usage/plot-mem.py
rename to scripts/profiling/doc_put_memory_usage/plot-mem.py