diff --git a/CHANGELOG.md b/CHANGELOG.md index 1640a7114a8cd23d0253e02b8024c039e34f81bf..1fd4c881ea7e563f02822d359217d00e09ad1d49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.2.1] - 2021-01-25 + +### Added + +- [core] implement `reportwrap` configuration parameter + +### Changed + +- [core] raise error if mail isn't found in `$PATH` and `reportemail = yes` +- [core] unset `BACKUPNINJA_DEBUG` before exit + +### Fixed + +- [build] make build reproducible regardless of usrmerge (DEBBUG-915222) +- [core] silence exit code message unless `--debug` is used +- [core] backup halt should trigger email report if enabled +- [core] wrap report email body to 1000 characters by default (DEBBUG-871793) +- [core] improve error handling around `reporthost` feature +- [docs] add missing parameters to `backupninja.conf` manpage +- [sys] improve device selection for MBR backup (#11303) + ## [1.2.0] - 2021-01-21 ### Fixed diff --git a/backupninja.spec b/backupninja.spec index 5b1b24a1c55a71e6fabb581af927fc59a1576fbc..b0490830aece6a701a242d434526321b56698f29 100644 --- a/backupninja.spec +++ b/backupninja.spec @@ -1,5 +1,5 @@ %define name backupninja -%define version 1.2.0 +%define version 1.2.1 Summary: Backupninja backup tool Name: %{name} diff --git a/configure b/configure index f57fa524289b5c20e65d265e000efb3ad4c0e0a4..faf96904914e7beefbb0bcfe9f1fee700eb2a215 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for backupninja 1.2.0. +# Generated by GNU Autoconf 2.69 for backupninja 1.2.1. # # Report bugs to <backupninja@lists.riseup.net>. # @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='backupninja' PACKAGE_TARNAME='backupninja' -PACKAGE_VERSION='1.2.0' -PACKAGE_STRING='backupninja 1.2.0' +PACKAGE_VERSION='1.2.1' +PACKAGE_STRING='backupninja 1.2.1' PACKAGE_BUGREPORT='backupninja@lists.riseup.net' PACKAGE_URL='' @@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures backupninja 1.2.0 to adapt to many kinds of systems. +\`configure' configures backupninja 1.2.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1286,7 +1286,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of backupninja 1.2.0:";; + short | recursive ) echo "Configuration of backupninja 1.2.1:";; esac cat <<\_ACEOF @@ -1360,7 +1360,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -backupninja configure 1.2.0 +backupninja configure 1.2.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1377,7 +1377,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by backupninja $as_me 1.2.0, which was +It was created by backupninja $as_me 1.2.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2241,7 +2241,7 @@ fi # Define the identity of the package. PACKAGE='backupninja' - VERSION='1.2.0' + VERSION='1.2.1' cat >>confdefs.h <<_ACEOF @@ -2337,9 +2337,6 @@ fi # Checks for programs. -# BASH may already be set in the shell, if the admin then changes the -# the /bin/sh symlink to a non-bash shell, all hell will break lose. -unset BASH for ac_prog in bash do # Extract the first word of "$ac_prog", so it can be a program name with args. @@ -3276,7 +3273,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by backupninja $as_me 1.2.0, which was +This file was extended by backupninja $as_me 1.2.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -3329,7 +3326,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -backupninja config.status 1.2.0 +backupninja config.status 1.2.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 891bb0f47d853933604abc1e3cb259937880ec02..0bb4d8091c1b18977fc97f45b53901089aa543c0 100644 --- a/configure.ac +++ b/configure.ac @@ -3,15 +3,12 @@ # The maintainer mode is causing me grief with newest versions of autotools #AM_MAINTAINER_MODE -AC_INIT([backupninja],[1.2.0],[backupninja@lists.riseup.net]) +AC_INIT([backupninja],[1.2.1],[backupninja@lists.riseup.net]) AC_CONFIG_SRCDIR([src/backupninja.in]) AM_INIT_AUTOMAKE([foreign]) # Checks for programs. -# BASH may already be set in the shell, if the admin then changes the -# the /bin/sh symlink to a non-bash shell, all hell will break lose. -unset BASH AC_PATH_PROGS(BASH, bash, "no", [$PATH:/bin:/usr/bin:/usr/sbin]) if test x$BASH = "xno"; then AC_MSG_ERROR([bash is required]) diff --git a/etc/backupninja.conf.in b/etc/backupninja.conf.in index a350fc849450e01a00ba9e3735ce1295bc6d6cef..f2366a32b782c8bf8e00d3dcd4b54209729cd29f 100644 --- a/etc/backupninja.conf.in +++ b/etc/backupninja.conf.in @@ -46,6 +46,9 @@ reportuser = ninja # use a globally unique name, preferably the hostname reportdirectory = /var/lib/backupninja/reports +# number of columns the report email body should wrap to +#reportwrap = 80 + # set to the administration group that is allowed to # read/write configuration files in /etc/backup.d admingroup = root diff --git a/examples/example.borg b/examples/example.borg index b95ad73b7e64166d81209a4b8f91265602c4886a..956d36ed1046014be5ea99673d8a4d3da438cc72 100644 --- a/examples/example.borg +++ b/examples/example.borg @@ -33,8 +33,8 @@ ## Default: # testconnect = yes -## default is not to limit bandwidth. -## set to a number in kiBytes/second to limit bandwidth usage. +## default is not to limit bandwidth. +## set to a number in kiBytes/second to limit bandwidth usage. ## ## Default: # bwlimit = 0 @@ -164,7 +164,7 @@ exclude = /var/lib/mysql ## for more info see : borg help create ## ## Default: -# archive = {now:%Y-%m-%dT%H:%M:%S} +# archive = {now:%Y-%m-%dT%H:%M:%S} ## compression algorithm ## can be "none", "lz4", "zstd[,L]", "zlib[,L]", "lzma[,L]", "auto,C[,L]". @@ -213,4 +213,4 @@ exclude = /var/lib/mysql ## sshoptions = -i /root/.ssh/id_rsa_borg ## ## Default: -# sshoptions = +# sshoptions = diff --git a/examples/example.dup b/examples/example.dup index 6977636a636bd36b1f0684fa6183bcb71761f627..87dedf365c524d17cba82f3133624441c76732e2 100644 --- a/examples/example.dup +++ b/examples/example.dup @@ -1,4 +1,4 @@ -## This is an example duplicity configuration file. +## This is an example duplicity configuration file. ## ## Here you can find all the possible duplicity options, details of ## what the options provide and possible settings. The defaults are set @@ -12,7 +12,7 @@ ## options = --s3-european-buckets --s3-use-new-style ## ## Default: -# options = +# options = ## default is 0, but set to something like 19 if you want to lower the priority. ## @@ -37,7 +37,7 @@ ## temporary directory used by duplicity, set to some other location if your /tmp is small ## default is either /tmp or /usr/tmp, depending on the system -## +## ## Default: # tmpdir = /tmp @@ -78,14 +78,14 @@ ## encryptkey = 04D9EA79 ## ## Default: -# encryptkey = +# encryptkey = ## ID of the GnuPG private key used for data signing. ## if not set, encryptkey will be used, an example setting would be: ## signkey = 04D9EA79 -## +## ## Default: -# signkey = +# signkey = ## password used to unlock the encryption key ## NB: neither quote this, nor should it contain any quotes, @@ -93,7 +93,7 @@ ## password = a_very_complicated_passphrase ## ## Default: -# password = +# password = ## password used to unlock the signature key, used only if ## it differs from the encryption key @@ -150,12 +150,12 @@ exclude = /var/cache/backupninja/duplicity ## perform an incremental backup? (default = yes) ## if incremental = no, perform a full backup in order to start a new backup set ## -## Default: +## Default: # incremental = yes ## how many days of incremental backups before doing a full backup again ; ## default is 30 days (one can also use the time format of duplicity). -## if increments = keep, never automatically perform a new full backup ; +## if increments = keep, never automatically perform a new full backup ; ## only perform incremental backups. ## ## Default: @@ -188,7 +188,7 @@ exclude = /var/cache/backupninja/duplicity ## the default value of this configuration option is not set: ## ## Default: -# desturl = +# desturl = ## Amazon Web Services Access Key ID and Secret Access Key, needed for backups ## to S3 buckets. @@ -196,8 +196,8 @@ exclude = /var/cache/backupninja/duplicity ## awssecretaccesskey = YOUR_AWS_SECRET_KEY ## ## Default: -# awsaccesskeyid = -# awssecretaccesskey = +# awsaccesskeyid = +# awssecretaccesskey = ## RackSpace's CloudFiles username, API key, and authentication URL. ## cfusername = YOUR_CF_USERNAME @@ -205,9 +205,9 @@ exclude = /var/cache/backupninja/duplicity ## cfauthurl = YOUR_CF_AUTH_URL ## ## Default: -# cfusername = -# cfapikey = -# cfauthurl = +# cfusername = +# cfapikey = +# cfauthurl = ## Dropbox requires a valid authentication token. To obtain one, you will need ## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create. @@ -222,7 +222,7 @@ exclude = /var/cache/backupninja/duplicity ## FTP password, needed for backups using desturl = ftp://... ## ## Default: -# ftp_password = +# ftp_password = ## bandwith limit, in KB/s ; default is 0, i.e. no limit ## if using 'desturl' above, 'bandwidthlimit' must not be set @@ -247,25 +247,25 @@ exclude = /var/cache/backupninja/duplicity ## warning: requires no space beetween "-o" and "IdentityFile=...". ## ## Default: -# sshoptions = +# sshoptions = ## put the backups under this destination directory ## if using 'desturl' above, this must not be set ## in all other cases, this must be set! ## an example setting would be: ## destdir = /backups -## +## ## Default: -# destdir = +# destdir = ## the machine which will receive the backups ## if using 'desturl' above, this must not be set ## in all other cases, this must be set! ## an example setting would be: ## desthost = backuphost -## -## Default: -# desthost = +## +## Default: +# desthost = ## make the files owned by this user ## if using 'desturl' above, this must not be set diff --git a/examples/example.maildir b/examples/example.maildir index eba54296c543f94a1dd7f21f907dc1d73e00d22f..0e8f3702b32b45ab78569700c65379cfb573ad45 100644 --- a/examples/example.maildir +++ b/examples/example.maildir @@ -13,10 +13,10 @@ ## We handle each maildir individually because it becomes very ## unweldy to hardlink and rsync many hundreds of thousands ## of files at once. It is much faster to take on smaller -## chunks at a time. +## chunks at a time. ## ## Any maildir which is deleted from the source will be moved to -## "deleted" directory in the destination. It is up to you to +## "deleted" directory in the destination. It is up to you to ## periodically remove this directory or old maildirs in it. ## ## Note: This handler assumes that the remote shell is set to bash @@ -27,7 +27,7 @@ when = everyday at 21:00 ## each users maildir will contain these files: -## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2, +## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2, ## weekly.3, monthly.1 ## if keepdaily is 5, keepweekly is 3, and keepmonthly is 1 keepdaily = 5 @@ -53,7 +53,7 @@ destuser = backer # For alternate ports from the default 22, specify here destport = 4444 -# If you need to specify an alternate ssh public key authentication file +# If you need to specify an alternate ssh public key authentication file # do that here. Default: /root/.ssh/id_rsa destid_file = /home/backupkeys/.ssh/maildirbackup_id_rsa diff --git a/examples/example.mysql b/examples/example.mysql index a0e386b10d55074e5386cf4496e8b2da1504ce0c..1915b0047e22ee6b4a8b4f1a110158bc239befb6 100644 --- a/examples/example.mysql +++ b/examples/example.mysql @@ -30,11 +30,11 @@ compress = yes # configfile = < path/to/file > (default = /etc/mysql/debian.cnf) # The config file is passed to mysql with --defaults-file. # On debian, this default will allow backupninja to make backups -# of mysql without configuring any additional options. +# of mysql without configuring any additional options. # (this option is not compatible with "user" or "dbusername"). # # user = <user> (default = root) -# Run mysql commands as 'user'. A valid .my.cnf must exist with a +# Run mysql commands as 'user'. A valid .my.cnf must exist with a # database username and password in the user's home directory. # (this option is not compatible with "configfile" or "dbusername"). # @@ -43,14 +43,14 @@ compress = yes # (this option is not compatible with "configfile" or "user"). # # dbpassword = <dbpass> (no default) -# The password used with dbusername. this password will NOT be passed +# The password used with dbusername. this password will NOT be passed # on the command line and is not readable using "ps aux". # # dbhost = <host> (default = localhost) # only localhost works right now. # # databases = < all | db1 db2 db3 > (default = all) -# which databases to backup. should either be the word 'all' or a +# which databases to backup. should either be the word 'all' or a # space separated list of database names. # # nodata = < db.table1 db.table2 db.table3 > (no default) @@ -60,10 +60,10 @@ compress = yes # isn't necessary to backup, but you still need the structure to exist # on a restore. You *must* specify the table as part of a database, such # as "drupal.cache", where the database name is "drupal" and the table that -# you do not want to dump the data for is called "cache". +# you do not want to dump the data for is called "cache". # # backupdir = < path/to/destination > (default = /var/backups/mysql) -# where to dump the backups. hotcopy backups will be in a subdirectory +# where to dump the backups. hotcopy backups will be in a subdirectory # 'hotcopy' and sqldump backups will be in a subdirectory 'sqldump' # # hotcopy = < yes | no > (default = no) @@ -78,4 +78,4 @@ compress = yes # arguments to pass to mysqldump # # compress = < yes | no > (default = yes) -# if yes, compress the sqldump output. +# if yes, compress the sqldump output. diff --git a/examples/example.pgsql b/examples/example.pgsql index d36ec3807893f5ddb5b189303cffabf74a05d9f9..73b77f5ad230b47e21fff690a14ccae5d1d15334 100644 --- a/examples/example.pgsql +++ b/examples/example.pgsql @@ -4,19 +4,19 @@ # where to dump the backups # databases = < all | db1 db2 db3 > (default = all) -# which databases to backup. should either be the word 'all' or a +# which databases to backup. should either be the word 'all' or a # space separated list of database names. # Note: when using 'all', pg_dumpall is used instead of pg_dump, which means # that cluster-wide data (such as users and groups) are saved. # compress = < yes | no > (default = yes) -# if yes, compress the pg_dump/pg_dumpall output. +# if yes, compress the pg_dump/pg_dumpall output. # format = < plain | tar | custom > (default = plain) # plain - Output a plain-text SQL script file with the extension .sql. # When dumping all databases, a single file is created via pg_dumpall. -# tar - Output a tar archive suitable for input into pg_restore. More -# flexible than plain and can be manipulated by standard Unix tools +# tar - Output a tar archive suitable for input into pg_restore. More +# flexible than plain and can be manipulated by standard Unix tools # such as tar. Creates a globals.sql file and an archive per database. # custom - Output a custom PostgreSQL pg_restore archive. This is the most # flexible format allowing selective import and reordering of database diff --git a/examples/example.rdiff b/examples/example.rdiff index 18940e6a4818178b6abc9032d24e1d0dd2ea4fab..f5d29fa2f67c7fd11825d8afd2d76cd3998444bb 100644 --- a/examples/example.rdiff +++ b/examples/example.rdiff @@ -6,17 +6,17 @@ ## as the commented out option, uncomment and change when ## necessary. Options which are uncommented in this example do not have ## defaults, and the settings provided are recommended. -## -## The defaults are useful in most cases, just make sure to configure the +## +## The defaults are useful in most cases, just make sure to configure the ## destination host and user. ## ## passed directly to rdiff-backup ## an example setting would be: ## options = --force -## +## ## Default: -# options = +# options = ## default is 0, but set to 19 if you want to lower the priority. ## an example setting would be: @@ -40,10 +40,10 @@ ## Default: # testconnect = no -## default is not to limit bandwidth. -## set to a number in bytes/second to limit bandwidth usage. Use a negative -## number to set a limit that will never be exceeded, or a positive number -## to set a target average bandwidth use. cstream is required. See cstream's +## default is not to limit bandwidth. +## set to a number in bytes/second to limit bandwidth usage. Use a negative +## number to set a limit that will never be exceeded, or a positive number +## to set a target average bandwidth use. cstream is required. See cstream's ## -t option for more information. 62500 bytes = 500 Kb (.5 Mb) ## an example setting would be: ## bwlimit = 62500 @@ -57,9 +57,9 @@ ## on remote and local side are different, and you are certain there are no ## problems in using mis-matched versions and want to get beyond this check. ## An example usage could be the remote side has its authorized_keys configured -## with command="rdiff-backup --server" to allow for restricted yet automated +## with command="rdiff-backup --server" to allow for restricted yet automated ## password-less backups -## +## ## Default: # ignore_version = no @@ -149,7 +149,7 @@ exclude = /var/cache/backupninja/duplicity ## put the backups under this directory, this must be set! ## an example setting would be: ## directory = /backups -## +## ## Default: # directory = @@ -175,4 +175,4 @@ exclude = /var/cache/backupninja/duplicity ## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity ## ## Default: -# sshoptions = +# sshoptions = diff --git a/examples/example.rsync b/examples/example.rsync index 9e1e1e90120be120dbc43b55301a55819eca9d43..3036bb15578fe607b7ade9ba4b89eaa789a57478 100644 --- a/examples/example.rsync +++ b/examples/example.rsync @@ -14,13 +14,13 @@ # just use this option if your data is backed up in a separate partition and # you want backupninja to fsck it; this option will just be used if fscheck # (see below) is set to 'yes' -#partition = +#partition = # set to 1 if fsck should run on partition after the backup is made -#fscheck = +#fscheck = # set to 1 if partition is mounted read-only -#read_only = +#read_only = # backup partition mountpoint or backup main folder # this doesn't need to be a real partition, but should be at least the @@ -40,15 +40,15 @@ backupdir = myserver # if you want to have incremental backups for longer periods (like months) you # have to configure rotations for 30 or more using the "days" parameter at the # [general] section in the handler config. -# +# # The short format is better described here: # http://www.mikerubel.org/computers/rsync_snapshots/#Incremental -# +# # The long format is inspired by the maildir handler and allows keeping backups # of longer periods (weeks and months) using less rotations as it stores # the increments in folders like daily.1, weekly.1, monthly.1 and has three # rotation parameters: -# +# # keepdaily = number of daily backup increments # keepweekly = number of weekly backup increments # keepmonthly = number of monthly backup increments diff --git a/examples/example.svn b/examples/example.svn index 0a64f0908b1aa74978525de817c3e3568da5572f..a9f17caea51dcfd319112cc38bb113b08ff18638 100644 --- a/examples/example.svn +++ b/examples/example.svn @@ -1,6 +1,6 @@ ## ## Perform a hot backup of subversion repositories. -## +## ## REQUIRES: apt-get install subversion-tools ## ## This file can be empty, the defaults are usually good. diff --git a/examples/example.sys b/examples/example.sys index 85bc650e282b29403a5917f144481d39b0574f1f..910060b642b2ccf226348b5f5abdb75d8f17c623 100644 --- a/examples/example.sys +++ b/examples/example.sys @@ -12,17 +12,17 @@ # (2) a list of all the packages installed and removed. # this file can be used to restore the state of installed packages # by running "dpkg --set-selections < dpkg-selections.txt and -# then run "apt-get -u dselect-upgrade". If you have the +# then run "apt-get -u dselect-upgrade". If you have the # debconf-set-selections file from (1), you should restore those first. -# -# (3) the partition table of all disks. +# +# (3) the partition table of all disks. # this partition table can be used to format another disk of -# the same size. this can be handy if using software raid and +# the same size. this can be handy if using software raid and # you have a disk go bad. just replace the disk and partition it # by running "sfdisk /dev/sdb < partitions.sdb.txt" # (MAKE SURE YOU PARTITION THE CORRECT DISK!!!) # -# (4) hardware information. +# (4) hardware information. # detailed information on most important aspects of the hardware. # # (5) the Luks header of every Luks block device, if option luksheaders @@ -53,9 +53,9 @@ # partitions = yes # NOTE: the __star__ below will be replaced by the disks found on the -# system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change -# the partitionsfile default below, be sure to include the __star__ -# replacement in the filename, or you will get one file for only one disk, +# system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change +# the partitionsfile default below, be sure to include the __star__ +# replacement in the filename, or you will get one file for only one disk, # the others being written to the same file, and then overwritten by the next. # partitionsfile = /var/backups/partitions.__star__.txt # dosfdisk = yes @@ -66,8 +66,8 @@ # luksheaders = no # NOTE: the __star__ below will be replaced by the Luks partitions found on the -# system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change -# the luksheadersfile default below, be sure to include the __star__ +# system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change +# the luksheadersfile default below, be sure to include the __star__ # replacement in the filename, or you will get one file for only one partition, # the others being written to the same file, and then overwritten by the next. # luksheadersfile = /var/backups/luksheader.__star__.bin diff --git a/examples/example.trac b/examples/example.trac index 645998eadc04fde186a00deb20a1ebc1a7aed90c..77d0e764c9c61c18b261d2370466f80c038b7899 100644 --- a/examples/example.trac +++ b/examples/example.trac @@ -1,6 +1,6 @@ ## ## Perform backups of trac environment -## +## ## REQUIRES: apt-get install trac ## ## This file can be empty, the defaults are usually good. diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in index 775da64a1eae546031f84be147ffcef0b8f10ff3..4a68927fba1a2b5c11d63a1bed28f9f33c59d068 100644 --- a/handlers/dup.helper.in +++ b/handlers/dup.helper.in @@ -356,7 +356,7 @@ incremental = $dup_incremental # how many days of incremental backups before doing a full backup again ; # default is 30 days (one can also use the time format of duplicity). -# if increments = keep, never automatically perform a new full backup ; +# if increments = keep, never automatically perform a new full backup ; # only perform incremental backups. #increments = 30 #increments = keep @@ -398,9 +398,9 @@ keepincroffulls = $dup_keepincroffulls ## cfauthurl = YOUR_CF_AUTH_URL ## ## Default: -# cfusername = -# cfapikey = -# cfauthurl = +# cfusername = +# cfapikey = +# cfauthurl = ## Dropbox requires a valid authentication token. To obtain one, you will need ## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create. @@ -413,7 +413,7 @@ keepincroffulls = $dup_keepincroffulls # dropboxaccesstoken = # FTP password, needed for backups using desturl = ftp://... -#ftp_password = +#ftp_password = # bandwith limit, in KB/s ; default is 0, i.e. no limit # if using 'desturl' above, 'bandwidthlimit' must not be set @@ -436,7 +436,7 @@ bandwidthlimit = $dup_bandwidth ## warning: requires no space beetween "-o" and "IdentityFile=...". ## ## Default: -# sshoptions = +# sshoptions = sshoptions = $dup_sshoptions # put the backups under this destination directory diff --git a/handlers/mysql.in b/handlers/mysql.in index f57b315f377690e34ef65d1295fed8928863bb78..bfc66bfa19792c59a19e84b03f28577b4bfb091d 100644 --- a/handlers/mysql.in +++ b/handlers/mysql.in @@ -63,7 +63,7 @@ then home=`getent passwd "root" | @AWK@ -F: '{print $6}'` [ -d $home ] || fatal "Can't find root's home directory ($home)." - + mycnf="$home/.my.cnf" workcnf="$mycnf" @@ -75,7 +75,7 @@ then debug "mv $workcnf $tmpcnf" mv $workcnf $tmpcnf fi - + oldmask=`umask` umask 077 cat > $workcnf <<EOF diff --git a/handlers/pgsql.helper.in b/handlers/pgsql.helper.in index 89a8c77a0ae7e81e3e0b68d24254a879765881b5..0b9ef13db3c6a863a433d45512dae8986cd51448 100644 --- a/handlers/pgsql.helper.in +++ b/handlers/pgsql.helper.in @@ -66,7 +66,7 @@ pgsql_wizard() { *) pgsql_format = "format = plain";; esac fi - + # write config file get_next_filename $configdirectory/20.pgsql @@ -91,8 +91,8 @@ $pgsql_compress # format = < plain | tar | custom > (default = plain) # plain - Output a plain-text SQL script file with the extension .sql. # When dumping all databases, a single file is created via pg_dumpall. -# tar - Output a tar archive suitable for input into pg_restore. More -# flexible than plain and can be manipulated by standard Unix tools +# tar - Output a tar archive suitable for input into pg_restore. More +# flexible than plain and can be manipulated by standard Unix tools # such as tar. Creates a globals.sql file and an archive per database. # custom - Output a custom PostgreSQL pg_restore archive. This is the most # flexible format allowing selective import and reordering of database diff --git a/handlers/rsync.in b/handlers/rsync.in index 571b6cecba99b096ad5ae887fdf0a00fcf454d32..636858f0d0bd6c125b5a61ab1099922bb263ab2f 100644 --- a/handlers/rsync.in +++ b/handlers/rsync.in @@ -101,18 +101,18 @@ # function definitions function eval_config { - + # system section - + setsection system getconf rm rm getconf cp cp getconf touch touch getconf mv mv getconf fsck fsck - + # general section - + setsection general getconf log /var/log/backup/rsync.log getconf partition @@ -130,14 +130,14 @@ function eval_config { getconf enable_mv_timestamp_bug no getconf tmp /tmp getconf multiconnection no - + # source section - + setsection source getconf from local getconf rsync $RSYNC getconf rsync_options "-av --delete --recursive" - + if [ "$from" == "remote" ]; then getconf testconnect no getconf protocol ssh @@ -156,7 +156,7 @@ function eval_config { getconf remote_rsync rsync getconf id_file /root/.ssh/id_rsa fi - + getconf batch no if [ "$batch" == "yes" ]; then @@ -172,13 +172,13 @@ function eval_config { getconf exclude getconf numericids 0 getconf compress 0 - + # dest section - + setsection dest getconf dest local getconf fakesuper no - + if [ "$dest" == "remote" ]; then getconf testconnect no getconf protocol ssh @@ -197,7 +197,7 @@ function eval_config { getconf remote_rsync rsync getconf id_file /root/.ssh/id_rsa fi - + getconf batch no if [ "$batch" != "yes" ]; then @@ -212,9 +212,9 @@ function eval_config { getconf numericids 0 getconf compress 0 - + # services section - + setsection services getconf initscripts /etc/init.d getconf service @@ -231,7 +231,7 @@ function eval_config { backupdir="$mountpoint/$backupdir" - if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then + if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then fatal "Backupdir $backupdir does not exist" fi @@ -247,9 +247,9 @@ function eval_config { fi fi - if [ ! -z "$nicelevel" ]; then + if [ ! -z "$nicelevel" ]; then nice="nice -n $nicelevel" - else + else nice="" fi @@ -419,7 +419,7 @@ function rotate_long { warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation." continue 1 fi - + # Rotate the current list of backups, if we can. oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1` [ "$oldest" == "" ] && oldest=0 @@ -488,7 +488,7 @@ function rotate_long { max=$((keep${rottype}+1)) dir="$backuproot/$rottype" oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1` - [ "$oldest" == "" ] && oldest=0 + [ "$oldest" == "" ] && oldest=0 # if we've rotated the last backup off the stack, remove it. for (( i=$oldest; i >= $max; i-- )); do if [ -d $dir.$i ]; then @@ -546,7 +546,7 @@ function rotate_long_remote { echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation." continue 1 fi - + # Rotate the current list of backups, if we can. oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\` [ "\$oldest" == "" ] && oldest=0 @@ -615,7 +615,7 @@ function rotate_long_remote { max=\$((keep\${rottype}+1)) dir="$backuproot/\$rottype" oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\` - [ "\$oldest" == "" ] && oldest=0 + [ "\$oldest" == "" ] && oldest=0 # if we've rotated the last backup off the stack, remove it. for (( i=\$oldest; i >= \$max; i-- )); do if [ -d \$dir.\$i ]; then @@ -847,7 +847,7 @@ function set_orig { } -function set_dest { +function set_dest { if [ "$dest" == "local" ]; then dest_path="$backupdir/$SECTION/$suffix/" @@ -981,7 +981,7 @@ function set_rsync_options { if [ ! -z "$bandwidthlimit" ]; then rsync_options="$rsync_options --bwlimit=$bandwidthlimit" fi - + if [ "$fakesuper" == "yes" ]; then remote_rsync="$remote_rsync --fake-super" fi diff --git a/handlers/sys.in b/handlers/sys.in index 375a86460fa4efa7e9b8430f9a93de5769f3386e..671fcd95e13919849d72da615723d5c68161aded 100644 --- a/handlers/sys.in +++ b/handlers/sys.in @@ -554,24 +554,39 @@ fi ## PARTITIONS ############################# -# here we use sfdisk to dump a listing of all the partitions. -# these files can be used to directly partition a disk of the same size. +if [ "$partitions" == "yes" ] || [ "$luksheaders" == "yes" ] || [ "$mbr" == "yes" ]; then + # get a list of block devices on the system + debug "LC_ALL=C $LSBLK --output NAME,TYPE --list --paths 2>/dev/null | grep \"disk$\" | grep -v '^/dev/zram' | @AWK@ '{print \$1}'" + devices=`LC_ALL=C $LSBLK --output NAME,TYPE --list --paths 2>/dev/null | grep "disk$" | grep -v '^/dev/zram' | @AWK@ '{print $1}'` + + if [ "$devices" == "" ]; then + warning "Unable to find any block devices on this system." + else + info "$(echo "Devices found: $devices" | tr "\n" " ")" + fi + + # get a list of block device partitions on the system + debug "LC_ALL=C $SFDISK -l 2>/dev/null | grep \"^/dev\" | @AWK@ '{print \$1}'" + devparts=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^/dev" | @AWK@ '{print $1}'` + + if [ "$devparts" == "" ]; then + info "No partitions found on this system." + else + info "$(echo "Partitions found: $partitions" | tr "\n" " ")" + fi +fi if [ "$partitions" == "yes" ]; then if [ "$dosfdisk" == "yes" ]; then - devices=`LC_ALL=C $LSBLK --output NAME,TYPE --list --paths 2>/dev/null | grep "disk$" | grep -v '^/dev/zram' | @AWK@ '{print $1}'` - partitions=`LC_ALL=C $SFDISK -l 2>/dev/null |grep "^/dev" | @AWK@ '{print $1}'` - if [ "$devices" == "" ]; then - warning "No harddisks found" - fi for dev in $devices; do - debug "$SFDISK will try to backup partition tables for device $dev" [ -b $dev ] || continue - echo "${partitions}" | grep -q "${dev}" - if [ $? -ne 0 ] ; then + if ! echo "${devparts}" | grep -q "${dev}"; then info "The device $dev does not appear to have any partitions" continue fi + # here we use sfdisk to dump a listing of all the partitions. + # these files can be used to directly partition a disk of the same size. + debug "$SFDISK will try to backup partition tables for device $dev" label=${dev#/dev/} label=${label//\//-} outputfile=${partitionsfile//__star__/$label} @@ -582,60 +597,45 @@ if [ "$partitions" == "yes" ]; then fi done fi - if [ "$dohwinfo" == "yes" ]; then - debug "Using $HWINFO to get all available disk information" - echo -e "\n\n====================== $disk ======================\n" >> $hardwarefile - $HWINFO --disk >> $hardwarefile - fi fi if [ "$luksheaders" == "yes" ]; then - devices=`LC_ALL=C $LSBLK --output NAME,TYPE --list --paths 2>/dev/null | grep "disk$" | grep -v '^/dev/zram' | @AWK@ '{print $1}'` - if [ "$devices" == "" ]; then - warning "No harddisks found" - fi - partitions=`LC_ALL=C $SFDISK -l 2>/dev/null |grep "^/dev" | @AWK@ '{print $1}'` - [ -n "$partitions" ] || warning "No partitions found" - targetdevices="" - for dev in $devices $partitions; do + for dev in $devices $devparts; do [ -b $dev ] || continue - debug "$CRYPTSETUP isLuks $dev" - $CRYPTSETUP isLuks $dev - [ $? -eq 0 ] && targetdevices="$targetdevices $dev" - done - for dev in $targetdevices; do - label=${dev#/dev/} - label=${label//\//-} - outputfile=${luksheadersfile//__star__/$label} - debug "Let us backup the LUKS header of $dev" - debug "$CRYPTSETUP luksHeaderBackup \"${dev}\" --header-backup-file \"${outputfile}\"" - output=`$CRYPTSETUP luksHeaderBackup "${dev}" --header-backup-file "${outputfile}" 2>&1` - exit_code=$? - if [ $exit_code -eq 0 ]; then - debug "$output" - info "The LUKS header of $dev was saved to $outputfile." - else - debug "$output" - fatal "The LUKS header of $dev could not be saved." + if $CRYPTSETUP isLuks $dev; then + label=${dev#/dev/} + label=${label//\//-} + outputfile=${luksheadersfile//__star__/$label} + debug "$CRYPTSETUP will try to backup the LUKS header for device $dev" + debug "$CRYPTSETUP luksHeaderBackup \"${dev}\" --header-backup-file \"${outputfile}\"" + output=`$CRYPTSETUP luksHeaderBackup "${dev}" --header-backup-file "${outputfile}" 2>&1` + exit_code=$? + if [ $exit_code -eq 0 ]; then + debug "$output" + info "The LUKS header of $dev was saved to $outputfile." + else + debug "$output" + fatal "The LUKS header of $dev could not be saved." + fi fi done fi if [ "$mbr" == "yes" ]; then - devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1` - if [ "$devices" == "" ]; then - warning "No harddisks found" - fi for dev in $devices; do - debug "Will try to backup MBR tables for device $dev" [ -b $dev ] || continue - label=${dev#/dev/} - label=${label//\//-} - outputfile=${mbrfile//__star__/$label} - debug "$DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null" - $DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null - if [ $? -ne 0 ]; then - warning "The MBR for $dev could not be saved." + if $SFDISK -d $dev 2>/dev/null | head -n1 | grep "label: dos"; then + debug "$SFDISK will try to backup MBR tables for device $dev" + label=${dev#/dev/} + label=${label//\//-} + outputfile=${mbrfile//__star__/$label} + debug "$DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null" + $DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null + if [ $? -ne 0 ]; then + warning "The MBR for $dev could not be saved." + fi + else + info "The device $dev does not appear to contain an MBR." fi done fi diff --git a/lib/Makefile.am b/lib/Makefile.am index 2180adc6e43e0fc8a5bbe08dfff1c1b9bffcc226..1d989d66ef12fc26cf55b9376e7232a14c892248 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -11,7 +11,7 @@ edit = sed \ -e "s,@AWK\@,$(AWK),g" \ -e "s,@SED\@,$(SED),g" \ -e "s,@MKTEMP\@,$(MKTEMP),g" \ - -e "s,@libdir\@,$(pkglibdir),g" + -e "s,@libdir\@,$(pkglibdir),g" easydialog: $(srcdir)/easydialog.in rm -f easydialog diff --git a/lib/Makefile.in b/lib/Makefile.in index e87df6cc7676083313ba80feb5202a2e884a12b5..028d841e646f9117d2ba34a5a9a8b8f3ebb9904a 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -238,7 +238,7 @@ edit = sed \ -e "s,@AWK\@,$(AWK),g" \ -e "s,@SED\@,$(SED),g" \ -e "s,@MKTEMP\@,$(MKTEMP),g" \ - -e "s,@libdir\@,$(pkglibdir),g" + -e "s,@libdir\@,$(pkglibdir),g" all: all-am diff --git a/lib/parseini.in b/lib/parseini.in index 2f2124c49dd6137532302a7d11085e1d6c172bd3..7ddde796392129505304d14118a13e2aba225ebc 100644 --- a/lib/parseini.in +++ b/lib/parseini.in @@ -1,5 +1,5 @@ # -*- mode: awk; indent-tabs-mode: nil; -*- -# +# # parseini --- parses 'ini' style configuration files. # # Usage: @@ -8,123 +8,123 @@ # if section is an empty string, then we use the default section # # example ini file: -# +# # fruit = apple # fruit = pear # multiline = this is a multiline \ # parameter # # # this is a comment -# [colors] +# [colors] # red = yes # green = no # blue = maybe # -# [ocean] -# fish = red +# [ocean] +# fish = red # fish = blue -# +# # example usage: -# > awk -f parseini S=ocean P=fish testfile.ini -# would return: +# > awk -f parseini S=ocean P=fish testfile.ini +# would return: # red # blue # - -BEGIN { - readlines = 1 - implied = 1 -} + +BEGIN { + readlines = 1 + implied = 1 +} # remove lines starting with #, but not #! -/^#[^!]/ {next} +/^#[^!]/ {next} # skip blank -/^[ \r\t]*$/ {next} +/^[ \r\t]*$/ {next} # we want to read the lines of the matched section # and disable for other sections -/^\[.+\][ \r\t]*$/ { - continueline = 0 - if (S && implied) { - nline = 0 - implied = 0 - } - if (S && match($0, "^\\[" S "\\][ \n]*")) { +/^\[.+\][ \r\t]*$/ { + continueline = 0 + if (S && implied) { + nline = 0 + implied = 0 + } + if (S && match($0, "^\\[" S "\\][ \n]*")) { # we found the section, so start reading. - readlines = 1 - } - else { + readlines = 1 + } + else { # no section, so stop reading lines - if (readlines) readlines = 0 - } - next -} + if (readlines) readlines = 0 + } + next +} # when reading, store lines. -{ - if (!readlines) next - line[nline++] = $0 - if ($0 ~ /\\[ \r\t]*$/) - continueline = 1 - else - continueline = 0 -} +{ + if (!readlines) next + line[nline++] = $0 + if ($0 ~ /\\[ \r\t]*$/) + continueline = 1 + else + continueline = 0 +} # process the read lines lines, matching parameters -END { +END { # if section is set but implied is still true # then we never found the section, so use everything - if (S && implied) { - nline = 0 - } + if (S && implied) { + nline = 0 + } + + # if have P then find P in read lines and get values + if (P) { + MATCH = "^[ \r\t]*" P "[ \r\t]*=" + continueline = 0 + for (x = 0; x < nline; ++x) { + v = line[x] + if (continueline) { + sub(/[ \r\t]+$/, "", v) + if (v ~ /\\$/) { + v = substr(v, 1, length(v)-1) + sub(/[ \r\t]+$/, "", v) + } + if (v) value[nvalue++] = v + } + else if (v ~ MATCH) { + sub(MATCH, "", v) + sub(/^[ \r\t]+/, "", v) + sub(/[ \r\t]+$/, "", v) + if (v ~ /\\$/) { + continueline = 1 + v = substr(v, 1, length(v)-1) + sub(/[ \r\t]+$/, "", v) + } + if (v) value[nvalue++] = v + } + } + # copy parameter definition to output array + nline = nvalue + for (x = 0; x < nvalue; ++x) + line[x] = value[x] + } + + # trim all leading & trailing whitespace; + # except for leading whitespace in continuation lines, - # if have P then find P in read lines and get values - if (P) { - MATCH = "^[ \r\t]*" P "[ \r\t]*=" - continueline = 0 - for (x = 0; x < nline; ++x) { - v = line[x] - if (continueline) { - sub(/[ \r\t]+$/, "", v) - if (v ~ /\\$/) { - v = substr(v, 1, length(v)-1) - sub(/[ \r\t]+$/, "", v) - } - if (v) value[nvalue++] = v - } - else if (v ~ MATCH) { - sub(MATCH, "", v) - sub(/^[ \r\t]+/, "", v) - sub(/[ \r\t]+$/, "", v) - if (v ~ /\\$/) { - continueline = 1 - v = substr(v, 1, length(v)-1) - sub(/[ \r\t]+$/, "", v) - } - if (v) value[nvalue++] = v - } - } - # copy parameter definition to output array - nline = nvalue - for (x = 0; x < nvalue; ++x) - line[x] = value[x] - } + for (x = 0; x < nline; ++x) { + sub(/^[ \r\t]+/, "", line[x]) + sub(/[ \r\t]+$/, "", line[x]) + } - # trim all leading & trailing whitespace; - # except for leading whitespace in continuation lines, - - for (x = 0; x < nline; ++x) { - sub(/^[ \r\t]+/, "", line[x]) - sub(/[ \r\t]+$/, "", line[x]) - } - # output the final result - for (x = 0; x < nline; ++x) - print line[x] + for (x = 0; x < nline; ++x) + print line[x] - if (nline) exit 0 - else exit 1 + if (nline) exit 0 + else exit 1 } diff --git a/man/backupninja.conf.5 b/man/backupninja.conf.5 index 72de35af9532a7d7b96b4eb92bb9adec8356076f..ff6b055330097a4a24ff0fd0c4606318f3965324 100644 --- a/man/backupninja.conf.5 +++ b/man/backupninja.conf.5 @@ -44,16 +44,40 @@ How verbose to make the logs. .TP .B reportemail -Send a summary of the backup status to this email address +Send a summary of the backup status to this email address. .TP .B reportsuccess If set to 'yes', a report email will be generated even if all modules reported success. +.TP +.B reportinfo +If set to 'yes', info messages from handlers will be sent into the email. + .TP .B reportwarning If set to 'yes', a report email will be generated even if there was no error. +.TP +.B reportspace +If set to 'yes', disk space usage will be included in the backup email report. + +.TP +.B reporthost +Where to rsync the backupninja.log to be aggregated in a ninjareport. + +.TP +.B reportuser +What user to connect to reporthost to sync the backupninja.log + +.TP +.B reportdirectory +Where on the reporthost should the report go. + +.TP +.B reportwrap +Number of columns the email report body should wrap to. + .TP .B logfile The path of the logfile. @@ -62,9 +86,13 @@ The path of the logfile. .B configdirectory The directory where all the backup action configuration files live. +.TP +.B admingroup +Administration user group that is allowed to read/write configuration files in \fBconfigdirectory\fB. + .TP .B scriptdirectory -Where backupninja handler scripts are found +Where backupninja handler scripts are found. .TP .B usecolors @@ -105,12 +133,24 @@ reportemail = root .br reportsuccess = yes .br -reportwarning = yes +reportinfo = no +.br +reportspace = no +.br +reporthost = +.br +reportuser = ninja +.br +reportdirectory = /var/lib/backupninja/reports +.br +reportwrap = 1000 .br logfile = /var/log/backupninja.log .br configdirectory = /etc/backup.d .br +admingroup = root +.br scriptdirectory = /usr/share/backupninja .br usecolors = yes diff --git a/src/backupninja.in b/src/backupninja.in index 5157efc04267d5d00a6a4fc96495f3c4cf67b639..10a8c6632b453f1cb48b8414b8c21243f09dcaf7 100755 --- a/src/backupninja.in +++ b/src/backupninja.in @@ -376,8 +376,8 @@ function process_action() { done ) retcode=${PIPESTATUS[0]} - debug "handler returned exit code $retcode" echo_debug_msg=0 + debug "handler returned exit code $retcode" else # a backup is probably ongoing already, so display an error message @@ -543,6 +543,7 @@ getconf RSYNC /usr/bin/rsync getconf DSYNC /usr/bin/dsync getconf DOVEADM /usr/bin/doveadm getconf admingroup root +getconf reportwrap 1000 if [ ! -d "$configdirectory" ]; then echo "Configuration directory '$configdirectory' not found." @@ -606,34 +607,41 @@ if [ $actions_run == 0 ]; then doit=0 elif [ "$reportemail" == "" ]; then doit=0 elif [ $fatals != 0 ]; then doit=1 elif [ $errors != 0 ]; then doit=1 +elif [ $halts != 0 ]; then doit=1 elif [ "$reportsuccess" == "yes" ]; then doit=1 elif [ "$reportwarning" == "yes" -a $warnings != 0 ]; then doit=1 else doit=0 fi if [ $doit == 1 ]; then - debug "send report to $reportemail" - hostname=`hostname` - [ $warnings == 0 ] || subject="WARNING" - [ $errors == 0 ] || subject="ERROR" - [ $fatals == 0 ] || subject="FAILED" - - { - for ((i=0; i < ${#messages[@]} ; i++)); do - echo ${messages[$i]} - done - echo -e "$errormsg" - if [ "$reportspace" == "yes" ]; then - previous="" - for i in $(ls "$configdirectory"); do - backuploc=$(grep ^directory "$configdirectory"/"$i" | @AWK@ '{print $3}') - if [ "$backuploc" != "$previous" -a -n "$backuploc" -a -d "$backuploc" ]; then - df -h "$backuploc" - previous="$backuploc" - fi + if [ -x "$(which mail 2>/dev/null)" ]; then + debug "send report to $reportemail" + hostname=`hostname` + [ $warnings == 0 ] || subject="WARNING" + [ $errors == 0 ] || subject="ERROR" + [ $fatals == 0 ] || subject="FAILED" + [ $halts == 0 ] || subject="HALTED" + + { + for ((i=0; i < ${#messages[@]} ; i++)); do + echo ${messages[$i]} done - fi - } | mail -s "backupninja: $hostname $subject" $reportemail + echo -e "$errormsg" + if [ "$reportspace" == "yes" ]; then + previous="" + for i in $(ls "$configdirectory"); do + backuploc=$(grep ^directory "$configdirectory"/"$i" | @AWK@ '{print $3}') + if [ "$backuploc" != "$previous" -a -n "$backuploc" -a -d "$backuploc" ]; then + df -h "$backuploc" + previous="$backuploc" + fi + done + fi + } | fold -s -w "$reportwrap" | mail -s "backupninja: $hostname $subject" $reportemail + else + error "Unable to locate mail executable, email report not sent!" + let "errors += 1" + fi fi if [ $actions_run != 0 ]; then @@ -644,10 +652,29 @@ if [ $actions_run != 0 ]; then fi if [ -n "$reporthost" ]; then - debug "send $logfile to $reportuser@$reporthost:$reportdirectory" - rsync -qt $logfile $reportuser@$reporthost:$reportdirectory + if [ -z "$reportuser" ] || [ -z "$reportdirectory" ]; then + error "Unable to send report, reportuser and reportdirectory must be specified." + let "errors += 1" + elif [ ! -x "$(which rsync 2>/dev/null)" ]; then + error "Unable to locate rsync executable, report could not be sent to ${reporthost}." + let "errors += 1" + else + info "Sending $logfile to $reportuser@$reporthost:$reportdirectory" + execstr="rsync -qt $logfile $reportuser@$reporthost:$reportdirectory" + debug $execstr + output=$(eval $execstr 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + error $output + error "An error was encountered attempting to send report to ${reporthost}." + let "errors += 1" + fi + fi fi +# unset exported envvars +unset BACKUPNINJA_DEBUG + # return exit code [ $halts == 0 ] || exit 2 [ $fatals == 0 ] || exit 2