diff --git a/AUTHORS b/AUTHORS
index be4e7e6545e1c93acdb6798d5be43a21c0ae55fb..8c1d1c8430c6c27102613a72bd210e53a68cde0c 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -30,7 +30,8 @@ Matthew Palmer <mpalmer@debian.org> -- halt loglevel feature
 dan@garthwaite.org -- reportspace bugfix
 Tuomas Jormola <tj@solitudo.net> -- "when = manual" option
 Ian Beckwith <ianb@erislabs.net> -- dup bandwidthlimit fix
-Olivier Berger <oberger@ouvaton.org> -- dup debug output bugfix, reportinfo option
+Olivier Berger <oberger@ouvaton.org> -- much work on the dup handler
 stefan <s.freudenberg@jpberlin.de> -- dup support for Amazon S3 buckets
 maniacmartin <martin@maniacmartin.com> -- rdiff confusing error message fix
 Chris Nolan <chris@cenolan.com> -- maildir subdirectory expansion
+Dan Carley -- mysql bugfix
diff --git a/ChangeLog b/ChangeLog
index 4be58baa181002eead2ba042b7c1432678706a5e..dd8f605969c9926e6c7385c1bef44bdf06d46143 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,39 @@
+version 0.9.8 -- UNRELEASED
+    backupninja changes
+	 . Added GZIP_OPTS option, defaulting to --rsyncable, so that this
+	   option can be disabled on systems that don't support it. This
+	   also allows to use another compression program, such as pbzip2
+	   on SMP machines (Closes Roundup bug #2405)
+    handler changes
+	sys:
+	 . Only run mdadm if RAID devices actually exist (Closes: #572450)
+	dup:
+	 . Now default to use --full-if-older-than; see the new "increments"
+	   option to opt-out or tweak the default (30D) delay between full
+	   backups. Thanks a lot to Olivier Berger (Closes: #535996)
+	 . Use duplicity's --extra-clean option to get rid of unnecessary old
+	   cache files when cleaning up. This is enabled when using duplicity
+	   0.6.01 or newer, that depends on local caching (Closes: #572721)
+	 . Ignore anything but digits and "." when comparing versions
+	   (Closes: #578987)
+	 . Put archive directory (cache) into /var/cache/backupninja/duplicity
+	   rather than the default /root/.cache/duplicity, unless the user
+	   has specified it (Closes: 580213)
+	 . Better example.dup documentation. Thanks, Alster!
+	 . Added ftp_password option to securely transmit the FTP password
+	   from backupninja to duplicity.
+	mysql:
+	 . Don't lock tables in the information_schema database
+	   (Closes: #587011)
+	 . Fix code logic to make dbusername/dbpassword actually usable
+	   (Closes Redmine bug #2264)
+    doc changes
+	manpage:
+	 . Fix typo in manpage (Closes: #583778)
+	ldap:
+	 . Add ssl option description, fix tls option description (Closes
+	   Roundup bug #2407)
+
 version 0.9.7 -- January 27, 2010
     backupninja changes
 	 . fix bug in reportspace, thanks Dan Garthwaite
diff --git a/NEWS b/NEWS
index ef8592eedceb28ebb64af9f8963ea029994f4415..9867b54358e0bee6578a56fd5be370b14484a1e6 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,23 @@
+backupninja (0.9.8-1) UNRELEASED
+
+	* duplicity 0.6.01 and later defaults to using an archive (cache)
+	  directory, which was previously opt-in. Starting with backupninja
+	  0.9.8, the backupninja duplicity handler puts this cache into
+	  /var/cache/backupninja/duplicity unless specified by the user with
+	  the "options" setting the *.dup job.
+	  When backups have been performed with backupninja older than 0.9.8 in
+	  conjunction with duplicity 0.6.01 or later, e.g. when using Sid or
+	  Squeeze at certain times of the Squeeze release cycle, cache files
+	  were probably saved into /root/.cache/duplicity; one may want to
+	  delete these files, or rather save bandwidth and just move the cache
+	  directory to the new location:
+
+	             mkdir -p /var/cache/backupninja
+	             mv /root/.cache/duplicity /var/cache/backupninja/
+
+	  It is probably desirable to exclude this cache directory from
+	  duplicity backup sets to avoid some kind of reentrant backup problem.
+
 backupninja (0.9.7-1) UNRELEASED
 
 	* mysql: output filenames to support shell meta-characters in
diff --git a/etc/backupninja.conf.in b/etc/backupninja.conf.in
index de1fbf3b553b92a9ff2634f97667a2e79c679c69..dee9fff6b0376305da03784500f47be55de0a8b7 100644
--- a/etc/backupninja.conf.in
+++ b/etc/backupninja.conf.in
@@ -86,6 +86,7 @@ vservers = no
 # PGSQLDUMP=/usr/bin/pg_dump
 # PGSQLDUMPALL=/usr/bin/pg_dumpall
 # GZIP=/bin/gzip
+# GZIP_OPTS='--rsyncable'
 # RSYNC=/usr/bin/rsync
 # VSERVERINFO=/usr/sbin/vserver-info
 # VSERVER=/usr/sbin/vserver
diff --git a/examples/example.dup b/examples/example.dup
index ea4d66e468e87228827efc5a9169d787eddfedfd..0ed5b2a87f2f35438806f61fcb7a3a8b122492b6 100644
--- a/examples/example.dup
+++ b/examples/example.dup
@@ -19,7 +19,8 @@
 ## Default:
 # nicelevel = 0
 
-## test the connection? set to no to skip the test if the remote host is alive
+## test the connection? set to no to skip the test if the remote host is alive.
+## if 'desturl' is set below, 'testconnect' must be set to 'no' for now.
 ##
 ## Default:
 # testconnect = yes
@@ -134,6 +135,7 @@ include = /var/lib/dpkg/status-old
 
 # files to exclude from the backup
 exclude = /home/*/.gnupg
+exclude = /var/cache/backupninja/duplicity
 
 ######################################################
 ## destination section
@@ -147,6 +149,14 @@ exclude = /home/*/.gnupg
 ## Default: 
 # incremental = yes
 
+## how many days of incremental backups before doing a full backup again ;
+## default is 30 days (one can also use the time format of duplicity).
+## if increments = keep, never automatically perform a new full backup ; 
+## only perform incremental backups.
+##
+## Default:
+# increments = 30
+
 ## how many days of data to keep ; default is 60 days.
 ## (you can also use the time format of duplicity)
 ## 'keep = yes' means : do not delete old data, the remote host will take care of this
@@ -161,6 +171,7 @@ exclude = /home/*/.gnupg
 ## desturl = file:///usr/local/backup
 ## desturl = rsync://user@other.host//var/backup/bla
 ## desturl = s3+http://
+## desturl = ftp://myftpuser@ftp.example.org/remote/ftp/path
 ## the default value of this configuration option is not set:
 ##
 ## Default:
@@ -175,8 +186,14 @@ exclude = /home/*/.gnupg
 # awsaccesskeyid = 
 # awssecretaccesskey = 
 
-## bandwith limit, in kbit/s ; default is 0, i.e. no limit an example
-## setting would be:
+## FTP password, needed for backups using desturl = ftp://...
+##
+## Default:
+# ftp_password = 
+
+## bandwith limit, in kbit/s ; default is 0, i.e. no limit
+## if using 'desturl' above, 'bandwidthlimit' must not be set
+## an example setting of 128 kbps would be:
 ## bandwidthlimit = 128
 ##
 ## Default:
@@ -191,14 +208,18 @@ exclude = /home/*/.gnupg
 ## Default:
 # sshoptions = 
 
-## put the backups under this directory, this must be set!
+## put the backups under this destination directory
+## if using 'desturl' above, this must not be set
+## in all other cases, this must be set!
 ## an example setting would be:
 ## destdir = /backups
 ## 
 ## Default:
 # destdir = 
 
-## the machine which will receive the backups, this must be set!
+## the machine which will receive the backups
+## if using 'desturl' above, this must not be set
+## in all other cases, this must be set!
 ## an example setting would be:
 ## desthost = backuphost
 ## 
@@ -206,12 +227,11 @@ exclude = /home/*/.gnupg
 # desthost = 
 
 ## make the files owned by this user
-## note: you must be able to ssh backupuser@backhost
-## without specifying a password (if type = remote).
+## if using 'desturl' above, this must not be set
+## note: if using an SSH based transport and 'type' is set to 'remote', you must
+##       be able to 'ssh backupuser@backuphost' without specifying a password.
 ## an example setting would be:
 ## destuser = backupuser
 ##
 ## Default:
 # destuser =
-
-
diff --git a/examples/example.ldap b/examples/example.ldap
index ee7c57d42f438fe8764aea93daa1e4dfec0bbb85..174ed1d405aa37aa14abf65c15f7a48a41e65d3c 100644
--- a/examples/example.ldap
+++ b/examples/example.ldap
@@ -46,6 +46,11 @@
 ## ldaphost (no default): set this to your ldap host if it is not local
 # ldaphost =
 
-## tls (default yes): if set to 'yes' then TLS connection will be
-## attempted to your ldaphost by using the URI base ldaps: otherwise ldap: will be used
-# tls = yes
\ No newline at end of file
+## ssl (default yes): if set to 'yes' then SSL connection will be
+## attempted to your ldaphost by using ldaps://
+# ssl = yes
+
+## tls (default no): if set to 'yes' then TLS connection will be
+## attempted to your ldaphost by using TLS extended operations (RFC2246,
+## RFC2830)
+# tls = no
diff --git a/examples/example.rdiff b/examples/example.rdiff
index 08e886965a109399fb205889e64c9902a5ab9cd7..5adecd8e92dd4baaaa35697b42ae95e030476d36 100644
--- a/examples/example.rdiff
+++ b/examples/example.rdiff
@@ -122,6 +122,7 @@ include = /var/lib/dpkg/status-old
 
 ## files to exclude from the backup
 exclude = /home/*/.gnupg
+exclude = /var/cache/backupninja/duplicity
 
 ######################################################
 ## destination section
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index 8b344dcf81759b42e21e881b66a4e35359d4594b..e985c5e7bd097646efe8a35d26c6eee935989659 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -123,6 +123,7 @@ do_dup_dest() {
         formItem "destdir" "$dup_destdir"
         formItem "keep" "$dup_keep"
         formItem "incremental" "$dup_incremental"
+        formItem "increments" "$dup_increments"
         formItem "bandwidthlimit" "$dup_bandwidth"
         formItem "sshoptions" "$dup_sshoptions"
       formDisplay
@@ -139,8 +140,9 @@ do_dup_dest() {
       dup_destdir=${thereply[2]}
       dup_keep=${thereply[3]}
       dup_incremental=${thereply[4]}
-      dup_bandwidth=${thereply[5]}
-      dup_sshoptions=${thereply[6]}
+      dup_increments=${thereply[5]}
+      dup_bandwidth=${thereply[6]}
+      dup_sshoptions=${thereply[7]}
 
    done
    set +o noglob
@@ -278,7 +280,8 @@ options = $dup_options
 # default is 0, but set to 19 if you want to lower the priority.
 nicelevel = $dup_nicelevel
 
-# default is yes. set to no to skip the test if the remote host is alive
+# default is yes. set to no to skip the test if the remote host is alive.
+# if 'desturl' is set below, 'testconnect' must be set to 'no' for now.
 testconnect = $dup_testconnect
 
 ######################################################
@@ -406,6 +409,14 @@ EOF
 # if incremental = no, perform a full backup in order to start a new backup set
 incremental = $dup_incremental
 
+# how many days of incremental backups before doing a full backup again ;
+# default is 30 days (one can also use the time format of duplicity).
+# if increments = keep, never automatically perform a new full backup ; 
+# only perform incremental backups.
+#increments = 30
+#increments = keep
+increments = $dup_increments
+
 # how many days of data to keep ; default is 60 days.
 # (you can also use the time format of duplicity)
 # 'keep = yes' means : do not delete old data, the remote host will take care of this
@@ -419,13 +430,19 @@ keep = $dup_keep
 #desturl = file:///usr/local/backup
 #desturl = rsync://user@other.host//var/backup/bla
 #desturl = s3+http://your_bucket
+#desturl = ftp://myftpuser@ftp.example.org/remote/ftp/path
 
 # Amazon Web Services Access Key ID and Secret Access Key, needed for backups
 # to S3 buckets.
 #awsaccesskeyid = YOUR_AWS_ACCESS_KEY_ID
 #awssecretaccesskey = YOUR_AWS_SECRET_KEY
 
+# FTP password, needed for backups using desturl = ftp://...
+#ftp_password = 
+
 # bandwith limit, in kbit/s ; default is 0, i.e. no limit
+# if using 'desturl' above, 'bandwidthlimit' must not be set
+# an example setting of 128 kbps would be:
 #bandwidthlimit = 128
 bandwidthlimit = $dup_bandwidth
 
@@ -435,15 +452,20 @@ bandwidthlimit = $dup_bandwidth
 #sshoptions = -o IdentityFile=/root/.ssh/id_dsa_duplicity
 sshoptions = $dup_sshoptions
 
-# put the backups under this directory
+# put the backups under this destination directory
+# if using 'desturl' above, this must not be set
+# in all other cases, this must be set!
 destdir = $dup_destdir
 
 # the machine which will receive the backups
+# if using 'desturl' above, this must not be set
+# in all other cases, this must be set!
 desthost = $dup_desthost
 
 # make the files owned by this user
-# note: you must be able to ssh backupuser@backhost
-# without specifying a password (if type = remote).
+# if using 'desturl' above, this must not be set
+# note: if using an SSH based transport and 'type' is set to 'remote', you must
+#       be able to 'ssh backupuser@backuphost' without specifying a password.
 destuser = $dup_destuser
 
 EOF
@@ -510,6 +532,7 @@ dup_wizard() {
    dup_excludes=
    dup_vsincludes=
    dup_incremental=yes
+   dup_increments=30
    dup_keep=60
    dup_bandwidth=
    dup_sshoptions=
@@ -529,7 +552,7 @@ dup_wizard() {
    # Global variables whose '*' shall not be expanded
    set -o noglob
    dup_default_includes="/var/spool/cron/crontabs /var/backups /etc /root /home /usr/local/*bin /var/lib/dpkg/status*"
-   dup_default_excludes="/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads"
+   dup_default_excludes="/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads /var/cache/backupninja/duplicity"
    set +o noglob
 
    dup_main_menu
diff --git a/handlers/dup.in b/handlers/dup.in
index ffae48c1bece695bcd2462f4bcc33e4929d34783..52166433f05507c65585397992f09a46b2402bf6 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -24,10 +24,12 @@ getconf exclude
 
 setsection dest
 getconf incremental yes
+getconf increments 30
 getconf keep 60
 getconf desturl
 getconf awsaccesskeyid
 getconf awssecretaccesskey
+getconf ftp_password
 getconf sshoptions
 getconf bandwidthlimit 0
 getconf desthost
@@ -43,6 +45,9 @@ destdir=${destdir%/}
 if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "s3+http" ]; then
    [ -n "$awsaccesskeyid" -a -n "$awssecretaccesskey" ]  || fatal "AWS access keys must be set for S3 backups."
 fi
+if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "ftp" ]; then
+   [ -n "$ftp_password" ]  || fatal "ftp_password must be set for FTP backups."
+fi
 
 ### VServers
 # If vservers are configured, check that the ones listed in $vsnames do exist.
@@ -95,8 +100,8 @@ else
    execstr_serverpart="scp://$destuser@$desthost/$destdir"
 fi
 
-### duplicity version
-duplicity_version="`duplicity --version | @AWK@ '{print $2}'`"
+### duplicity version (ignore anything else than 0-9 and ".")
+duplicity_version="`duplicity --version | @AWK@ '{print $2}' | @SED@ 's/[^.[:digit:]]//g'`"
 duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"
 duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"
 duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
@@ -163,6 +168,20 @@ if [ "$incremental" == "no" ]; then
    else
       execstr_command="full"
    fi
+else
+   # we're in incremental mode
+   if [ "$increments" != "keep" ]; then
+      # if we don't want to keep every increments
+      if [ "`echo $increments | tr -d 0-9`" == "" ]; then
+         increments="${increments}D"
+      fi
+      execstr_options="${execstr_options} --full-if-older-than $increments"
+   fi
+fi
+
+### Cleanup options
+if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 1 ]; then
+   execstr_options="${execstr_options} --extra-clean"
 fi
 
 ### Temporary directory
@@ -178,6 +197,15 @@ if [ -n "$tmpdir" ]; then
    precmd="${precmd}TMPDIR=$tmpdir "
 fi
 
+### Archive directory
+# duplicity >= 0.6.01 enables the archive_dir by default, let's put it into /var/cache/backupninja/duplicity
+# unless the user has specified it.
+if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 1 ]; then
+   if echo "${options}" | grep -qv -- "--archive-dir" ; then
+      execstr_options="${execstr_options} --archive-dir /var/cache/backupninja/duplicity"
+   fi
+fi
+
 ### Cleanup old backup sets (or not)
 if [ "$keep" != "yes" ]; then
    if [ "`echo $keep | tr -d 0-9`" == "" ]; then
@@ -245,6 +273,7 @@ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -g
    debug "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart"
    if [ ! $test ]; then
       export PASSPHRASE=$password
+      export FTP_PASSWORD=$ftp_password
       output=`nice -n $nicelevel \
          su -c \
          "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"`
@@ -265,6 +294,7 @@ if [ "$keep" != "yes" ]; then
       debug "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
       if [ ! $test ]; then
          export PASSPHRASE=$password
+         export FTP_PASSWORD=$ftp_password
          output=`nice -n $nicelevel \
                    su -c \
                       "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`
@@ -285,6 +315,7 @@ debug "$precmd duplicity $execstr_command $execstr_options $execstr_source --exc
 if [ ! $test ]; then
    outputfile=`maketemp backupout`
    export PASSPHRASE=$password
+   export FTP_PASSWORD=$ftp_password
    output=`nice -n $nicelevel \
              su -c \
                 "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
diff --git a/handlers/ldap.in b/handlers/ldap.in
index fda24d08878304f044821d08c5608cfde171402f..83307eedd3992e1f4d056dd9dfe0a90e66d732cc 100644
--- a/handlers/ldap.in
+++ b/handlers/ldap.in
@@ -86,7 +86,7 @@ if [ "$ldif" == "yes" ]; then
          fi
 
          if [ "$compress" == "yes" ]; then
-            execstr="$execstr | $GZIP --rsyncable > $dumpdir/$dbsuffix.ldif.gz"
+            execstr="$execstr | $GZIP $GZIP_OPTS > $dumpdir/$dbsuffix.ldif.gz"
          else
             execstr="$execstr > $dumpdir/$dbsuffix.ldif"
          fi
diff --git a/handlers/mysql.in b/handlers/mysql.in
index 3488c511becd6ecd8f247d4f2f476f9303d2fcbd..0282046f6387c6c458f07fe7a786e28186017d7f 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -86,7 +86,7 @@ fi
 
 defaultsfile=""
 
-if [ "$dbusername" != "" -a "$dbpassword" != "" ]
+if [ -n "$dbusername" -a -n "$dbpassword" ]
 then
    if [ $usevserver = yes ]
    then
@@ -140,13 +140,10 @@ password="$dbpassword"
 EOF
    umask $oldmask
    defaultsfile="--defaults-extra-file=$mycnf"
-fi
 
-# if a user is not set, use $configfile, otherwise use $mycnf
-if [ "$user" == "" ]; then
-   user=root;
-   defaultsfile="--defaults-extra-file=$configfile"
-else
+# else, if a user is set use her .my.cnf
+elif [ -n "$user" ]
+then
    userset=true;
    if [ $usevserver = yes ]
    then
@@ -169,6 +166,10 @@ else
 	
    defaultsfile="--defaults-extra-file=$userhome/.my.cnf"
    debug "using $defaultsfile"
+# otherwise use $configfile
+else
+   user=root
+   defaultsfile="--defaults-extra-file=$configfile"
 fi
 
 #######################################################################
@@ -255,6 +256,10 @@ then
    for db in $databases
    do
       DUMP_BASE="$MYSQLDUMP $defaultsfile $sqldumpoptions"
+      if [ "$db" = "information_schema" ]
+      then
+         DUMP_BASE="${DUMP_BASE} --skip-lock-tables"
+      fi
 
       # Dumping structure and data
       DUMP="$DUMP_BASE $ignore $db"
@@ -279,7 +284,7 @@ then
             fatal "mysqld doesn't appear to be running!"
          fi
          if [ "$compress" == "yes" ]; then
-            execstr="$VSERVER $vsname exec $DUMP | $GZIP --rsyncable > '$vroot$dumpdir/${db}.sql.gz'"
+            execstr="$VSERVER $vsname exec $DUMP | $GZIP $GZIP_OPTS > '$vroot$dumpdir/${db}.sql.gz'"
          else
             execstr="$VSERVER $vsname exec $DUMP -r '$vroot$dumpdir/${db}.sql'"
          fi
@@ -290,7 +295,7 @@ then
             fatal "mysqld doesn't appear to be running!"
          fi
          if [ "$compress" == "yes" ]; then
-            execstr="$DUMP | $GZIP --rsyncable > '$dumpdir/${db}.sql.gz'"
+            execstr="$DUMP | $GZIP $GZIP_OPTS > '$dumpdir/${db}.sql.gz'"
          else
             execstr="$DUMP -r '$dumpdir/${db}.sql'"
          fi
diff --git a/handlers/pgsql.in b/handlers/pgsql.in
index 77a73fee98ed7f4f7d90ceff53bb56743b0501f0..0b7badfdecb51c1c122c71f04a1ecf807a304f89 100644
--- a/handlers/pgsql.in
+++ b/handlers/pgsql.in
@@ -75,13 +75,13 @@ chmod 700 $vroot$backupdir
 if [ "$databases" == "all" ]; then
    if [ $usevserver = yes ]; then
       if [ "$compress" == "yes" ]; then
-         execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP --rsyncable > '$backupdir/${vsname}.sql.gz'\""
+         execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""
       else
          execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${vsname}.sql'\""
       fi
    else
       if [ "$compress" == "yes" ]; then
-         execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP --rsyncable > '$backupdir/${localhost}-all.sql.gz'\""
+         execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""
       else
          execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${localhost}-all.sql'\""
       fi
@@ -104,13 +104,13 @@ else
    for db in $databases; do
       if [ $usevserver = yes ]; then
          if [ "$compress" == "yes" ]; then
-            execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP --rsyncable > '$backupdir/${db}.sql.gz'\""
+            execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
          else
             execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > '$backupdir/${db}.sql'\""
          fi
       else
          if [ "$compress" == "yes" ]; then
-            execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP --rsyncable > '$backupdir/${db}.sql.gz'\""
+            execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
          else
             execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > '$backupdir/${db}.sql'\""
          fi
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index e35d6297f5babc58e6633826b6e7e5477e35d5ab..b5bb8bba35deca7cd06879a16433b9f7e778e628 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -415,7 +415,7 @@ rdiff_wizard() {
    # Global variables whose '*' shall not be expanded
    set -o noglob
    rdiff_includes=(/var/spool/cron/crontabs /var/backups /etc /root /home /usr/local/*bin /var/lib/dpkg/status*)
-   rdiff_excludes=(/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads)
+   rdiff_excludes=(/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads /var/cache/backupninja/duplicity)
    rdiff_vsincludes=
    set +o noglob
 
diff --git a/handlers/sys.in b/handlers/sys.in
index 69751ed125978d7c2df25ea4ee0e14634252a301..a92663e23766b43c9ed26ecb4e62d21d46effc70 100755
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -462,7 +462,9 @@ STATUS="Collecting Software RAID information (/etc/mdadm.conf)"
 catiffile "/etc/mdadm.conf"
 
 STATUS="Collecting Software RAID information (/sbin/mdadm -Q)"
-catifexec "/sbin/mdadm" "-Q" "--detail" '/dev/md?*'
+if ls /dev/md?* >/dev/null 2>&1; then
+   catifexec "/sbin/mdadm" "-Q" "--detail" '/dev/md?*'
+fi
 
 STATUS="Collecting Automount information (auto.master)"
 catiffile "/etc/auto.master"
diff --git a/man/backupninja.1 b/man/backupninja.1
index 53b1e3cf55635960ebf9e53d183f787326ff6e02..5622881928735ad55d93fd1574fdd75164f720b1 100644
--- a/man/backupninja.1
+++ b/man/backupninja.1
@@ -96,7 +96,7 @@ To preform the actual backup actions, backupninja processes each action configur
 
 .SH EXAMPLE USAGE
 .TP
-Backupninja can be used to impliment whatever backup strategy you choose. It is intended, however, to be used like so:
+Backupninja can be used to implement whatever backup strategy you choose. It is intended, however, to be used like so:
 .TP
 First, databases are safely copied or exported to /var/backups.  Often, you cannot make a file backup of a database while it is in use, hence the need to use special tools to make a safe copy or export into /var/backups.
 .TP
diff --git a/src/backupninja.in b/src/backupninja.in
index e8a820ff562aa43b21f2bce94526b7d726a0afcf..b397a142069ab5ca9f641f65800c9427a86d871a 100755
--- a/src/backupninja.in
+++ b/src/backupninja.in
@@ -480,6 +480,7 @@ getconf PGSQLDUMP /usr/bin/pg_dump
 getconf PGSQLDUMPALL /usr/bin/pg_dumpall
 getconf PGSQLUSER postgres
 getconf GZIP /bin/gzip
+getconf GZIP_OPTS --rsyncable
 getconf RSYNC /usr/bin/rsync
 getconf admingroup root