diff --git a/backup-everything.sh b/backup-everything.sh index bfb31a4..955dd9e 100755 --- a/backup-everything.sh +++ b/backup-everything.sh @@ -1,722 +1,722 @@ #!/bin/bash ### # Micro script to backup some stuff # # IMPORTANT: do not edit this file # and see 'backup-instructions.conf' instead! # # This file defines some functions and then it executes # your 'backup-instructions.conf' # # Author: 2020, 2021 Valerio Bozzolan # License: MIT ## # try to proceed even in case of errors (to do not skip any backup) # set -e # current directory MYDIR="$(dirname "$(realpath "$0")")" # load useful stuff . "$MYDIR"/bootstrap.sh # no instructions no party if [ ! -f "$INSTRUCTIONS" ]; then echo "missing instructions expected in $INSTRUCTIONS" exit 1 fi ### # Create a directory with safe permissions if it does not exist # # @param string path Pathname # write_safe_dir_if_unexisting() { local path="$1" # try to create the directory if it does not exists if [ ! -d "$path" ]; then warn "creating missing $path with 750" mkdir --parents "$path" chmod 750 "$path" fi # it must exists now require_existing_dir "$path" } ### # Create a directory for a filename with safe permissions if it does not exist # # @param string path Pathname to a filename # @param int is_file_mode Check if we are in file mode (default: directory mode) # write_safe_basedir_if_unexisting() { # first function argument local path="$1" # extract just the sub-directory local basepath basepath=$(dirname "$path") # create that sub-directory write_safe_dir_if_unexisting "$basepath" } ### # Require an existing directory or die # # @param string Directory path # require_existing_dir() { if [ ! -d "$1" ]; then error "unexisting directory $1" exit 1 fi } # create these pathnames if they do not exist write_safe_dir_if_unexisting "$BASE" write_safe_dir_if_unexisting "$DAILY" write_safe_dir_if_unexisting "$DAILY_FILES" write_safe_dir_if_unexisting "$DAILY_DATABASES" # create or clean the last log file if [ "$WRITELOG" = 1 ]; then cat /dev/null > "$DAILY_LASTLOG" fi # # Dump and compress a database by its name. # # @param db string Database name # @param spaces string Cosmetic spaces # backup_database() { local db="$1" local spaces="$2" local path="$DAILY_DATABASES"/"$db".sql.gz # Log the most appropriate message. if [ -n "${TABLES_TO_BE_SKIPPED["$db"]}" ]; then # The user can ignore some tables. Better to have that information in an explicit way. log "$spaces""dumping database $db (skipping tables: ${TABLES_TO_BE_SKIPPED["$db"]})" else log "$spaces""dumping database $db" fi # no database no party if ! $MYSQL "$db" -e exit > /dev/null 2>&1; then warn "$spaces skip unexisting database" return fi # # MySQL dump with table skipper # # Eventually generate something like: # 'mysqldump --ignore-table=DB.table1 --ignore-table=DB.table2' etc. local mysqldump="$MYSQLDUMP" for table in ${TABLES_TO_BE_SKIPPED["$db"]}; do mysqldump+=" --ignore-table=$db.$table" done # Perform the operation only if porcelain mode is disabled. if [ "$PORCELAIN" = 1 ]; then # Debug the dump without doing it. log "$spaces [PORCELAIN] $mysqldump $db > $path" else # Perform the dump. $mysqldump "$db" | gzip > "$path" fi # If we ignored some tables, let's also dump the schema of these tables, separately. # So the backup just ignores the data, and not the schema, and tables are easy to be recovered. # So this is just an extra 'mysqldump' with '--no-data'. # We also use '--skip-dump-date' since the schema has possibility to never change, # and so, also the file should be (binary) the same. # So, without dates, we save bytes on your backup storage, and also some transfer bandwidth. for table in ${TABLES_TO_BE_SKIPPED["$db"]}; do local path_dump_nodata="$DAILY_DATABASES"/"$db"."$table".nodata.sql.gz local mysqldump_nodata="$MYSQLDUMP --no-data --skip-dump-date" log "$spaces saving schema of table $db.$table" # Perform the operation only if porcelain mode is disabled. if [ "$PORCELAIN" = 1 ]; then log "$spaces [PORCELAIN] $mysqldump_nodata $db $table > $path_dump_nodata" else $mysqldump_nodata "$db" "$table" | gzip > "$path_dump_nodata" fi done } # # Dump and compress some databases # # @param database... string Database names # backup_databases() { local something_done= for database in $@; do something_done=1 backup_database "$database" done if ! [ "$something_done" = 1 ]; then error "Bad usage: backup_databases DATABASE_NAME1 DATABASE_NAME2" error " Have you confused this instruction with this one?" error " backup_every_database" exit 1 fi } # # Dump some databases with a shared prefix # # @param prefix string Database prefix # @param spaces string Cosmetic spaces # backup_databases_prefixed() { local prefix="$1" local spaces="$2" log "$spaces""backup all databases prefixed with: $prefix" databases=$($MYSQL --skip-column-names --execute='SHOW DATABASES' | grep "$prefix") for database in $databases; do backup_database "$database" "$spaces " done } # default databases to be skipped DATABASE_SKIP_LIST=("^information_schema$" "^performance_schema$") # database tables indexed by database name declare -A TABLES_TO_BE_SKIPPED # # Call this to skip a database # # @param string database Database pattern (Bash regex) # # This should be called before calling backup_every_database. # skip_database() { local db="$1" DATABASE_SKIP_LIST+=( "$db" ) } # # Skip data of a specific database table. # # We will do our best to skip the table schema instead. # This must be called before calling 'backup_every_database' # and also before 'backup_database DBNAME' to have effect. # # To skip multiple tables, just call this method for each table. # # @param string database Database name (exact name) # @param string table Database table (exact name) # @return void # skip_database_table_data() { local db="$1" local table="$2" # Eventually pre-pend a space. Since the tables are just a space-separated string. if [ -n "${TABLES_TO_BE_SKIPPED["$db"]}" ]; then TABLES_TO_BE_SKIPPED["$db"]+=" " fi # Add the table to be ignored. TABLES_TO_BE_SKIPPED["$db"]+="$table" } # # Backup every single database. That's easy. # backup_every_database() { # backup every damn database databases=$($MYSQL -e 'SHOW DATABASES') for database in $databases; do local do_backup=1 # just skip the information_schema and the performance_schema that cannot be locked for skip_entry in "${DATABASE_SKIP_LIST[@]}"; do if [[ "$database" =~ $skip_entry ]]; then # show a cute message log "skippin database $database matching blacklist" # do not backup this do_backup=0 # do not check other entries break fi done # backup if it does not match the above skip entries if [ "$do_backup" = 1 ]; then backup_database "$database" fi done } # # Backup a database that is used by a service # # The service will be stopped before dumping. # # @param service string Systemd unit file # @param db string Database name # backup_service_and_database() { local service="$1" local db="$2" local is_active= log "start backup service '$service' with database '$db'" # check if the service is running if systemctl is-active "$service" > /dev/null; then is_active=1 fi # eventually stop the service if [ $is_active == 1 ]; then if [ "$NO_DISSERVICE" == 1 ]; then warn " NOT stopping service: $service (to avoid any disservice)" else log " stopping service: $service" if [ "$PORCELAIN" != 1 ]; then systemctl stop "$service" fi fi else log " service already inactive: $service" fi # backup the database now that the service is down backup_database "$db" " " # eventually start again the service if [ $is_active == 1 ]; then if [ "$NO_DISSERVICE" == 1 ]; then warn " NOT starting again service: $service (to avoid any disservice)" else log " starting again service: $service" if [ "$PORCELAIN" != 1 ]; then systemctl start "$service" fi fi fi } # # Backup some databases used by a service # # The service will be stopped before dumping the databases # # @param service string Systemd unit file # @param db... string Database names # backup_service_and_databases() { backup_service_and_database $@ } # # Backup phabricator # # @param path string Phabricator webroot # @param dbprefix string Database prefix # backup_phabricator() { local path="$1" local dbprefix="$2" local repos_full_path="$3" local configset="$path"/bin/config log "backup Phabricator databases" log " set Phabricator read only mode (and wait some time to make it effective)" $configset set cluster.read-only true > /dev/null sleep 5 backup_databases_prefixed "$dbprefix" " " # if specified, backup the repositories if [ -n "$repos_full_path" ]; then log " start Phabricator repositories backup" backup_path "$repos_full_path" else log " skipping repositories (be sure they are under another backup)" fi log " revert Phabricator read only mode" $configset set cluster.read-only false > /dev/null } # # Backup a directory or a filename # # @param path string Pathname to be backupped # @param identifier string Optional identifier of this pathname # backup_path() { local path="$1" local identifier="$2" # create a default identifier if [ -z "$identifier" ]; then identifier="$path" fi # destination directory # note that the identifier may start with a slash but this is good, just don't care dest="$DAILY_FILES/$identifier" # tell were the backup will go if [ "$path" = "$identifier" ]; then log "backup $path" else log "backup $path with identifier $identifier" fi # check if the path exists if [ -e "$path" ]; then # check if it's a directory if [ -d "$path" ]; then # this is a directory # eventually create the destination if it does not exist write_safe_dir_if_unexisting "$dest" # backup this # force the source to end with a slash in order to copy the files inside the directory $RSYNC "$path/" "$dest" else # this is a filename # eventually create the base destination if it does not exist write_safe_basedir_if_unexisting "$dest" # backup this filename $RSYNC "$path" "$dest" fi else # no path no party warn " path not found: $path" fi } # # Backup last lines of a filename and compress them # # @param path string Pathname to be backupped # @param identifier string Optional identifier of this pathname (it's considered a file) # @param lines string Lines to be saved # backup_last_log_lines() { local path="$1" local identifier="$2" local lines="$3" # create a default identifier if [ -z "$identifier" ]; then identifier="$path" fi # default lines if [ -z "$lines" ]; then lines="$BACKUP_LAST_LOG_LINES" fi # destination directory # note that the identifier may start with a slash but this is good, just don't care dest="$DAILY_FILES/$identifier" # tell were the backup will go if [ "$path" = "$identifier" ]; then log "backup $path last $lines log lines" else log "backup $path last $lines log lines with identifier $identifier" fi # check if the path exists if [ -e "$path" ]; then # this is a filename # eventually create the base destination if it does not exist write_safe_basedir_if_unexisting "$dest" # backup this file tail -n "$lines" "$path" > "$dest" log "compressing $dest" gzip --force "$dest" else # no path no party warn " path not found: $path" fi } # # Backup some filenames ending with whatever # # @param path[...] string Pathname to be backupped # backup_paths() { # pathnames local paths="$@" # process every pathname for path in $paths; do # backup every single pathname backup_path "$path" done } ## # Validate the backup host arguments # # validate_backup_host_args() { # function parameters local port="$3" # no host no party if [ -z "$1" ] || [ -z "$2" ]; then error "Bad usage: backup_host REMOTE_HOST:REMOTE_PATH IDENTIFIER [PORT]" exit 1 fi # tell what we will do if [ "$port" = "22" ]; then log "backup $1 in $2" else log "backup $1 (:$port) in $2" fi } ## # Backup another host directory via rsync # # @param string Rsync host # @param string Rsync path to a directory # @param string Rsync host port # backup_host_dir() { # function parameters local hostlink="$1" local identifier="$2" local port="$3" # eventually set default rsync port if [ -z "$port" ]; then port=22 fi # validate the arguments validate_backup_host_args "$hostlink" "$identifier" "$port" # destination path local dest="$BASE/$identifier" # create the destination if it does not exist write_safe_dir_if_unexisting "$dest" # backup everything # force the source to end with a slash to just copy the files inside it $RSYNC_REMOTE --rsh="ssh -p $port" "$hostlink/" "$dest" } ## # Backup another host directory via rsync # # @param string Rsync host # @param string Rsync path to a directory # push_path_host() { # function parameters local source="$1" local hostlink="$2" local port="$3" # eventually set default rsync port if [ -z "$port" ]; then port=22 fi # validate the arguments and print message validate_backup_host_args "$source" "$hostlink" "$port" # backup everything # force the source to end with a slash to just copy the files inside it $RSYNC_REMOTE --rsh="ssh -p $port" "$source" "$hostlink" } ## # Push some pathnames into an host # # @param string Rsync host # @param string Rsync port # @param string... Rsync path to some directories # push_host_port_paths() { # function parameters local hostlink="$1" local port="$2" # strip the first two arguments shift shift local path="" # process every pathname while [[ $# -gt 0 ]]; do path="$1" shift # backup every single pathname push_path_host "$path" "$hostlink" "$port" done } ## # Push the local "daily/" directory into a remote host # # @param string Rsync host # @param string Rsync path to a directory # push_daily_directory() { # function parameters local hostlink="$1" local port="$2" push_path_host "$DAILY/" "$hostlink" "$port" } ## # Backup another host file via rsync # # @param string hostlink Rsync source host and pathname e.g. ravotti94:/tmp/asd.txt # @param string identifier Rsync destination directory # backup_host_file() { # function parameters local hostlink="$1" local identifier="$2" local port="$3" # filename local filename= filename=$(basename "$hostlink") # eventually set default rsync port if [ -z "$port" ]; then port=22 fi # validate the arguments validate_backup_host_args "$hostlink" "$identifier" "$port" # destination path local dest="$BASE/$identifier/$filename" # create the destination directory if it does not exist write_safe_basedir_if_unexisting "$dest" # backup everything # force the source to end with a slash to just copy the files inside it $RSYNC_REMOTE --rsh="ssh -p $port" "$hostlink" "$dest" } # # Send a date to a Zabbix item # # This will be done only if Zabbix is installed. # # The Zabbix item must be of type "Zabbix trapper" # # If the "zabbix_sender" command is not installed, nothing will be done. # # Item definition: # - Type: Zabbix trapper # - Unit: unixtime # - Type: Numeric # zabbix_sender_date_if_installed() { local zabbix_item_key="$1" # check if Zabbix sender is already installed if which zabbix_sender > /dev/null; then local current_time current_time="$(date +%s)" # note that the Zabbix sender can fail # note that the Zabbix output is unuseful # if you have problems, remove this part: > /dev/null if zabbix_sender \ --config /etc/zabbix/zabbix_agentd.conf \ --key "$zabbix_item_key" \ --value "$current_time" > /dev/null; then log "sent Zabbix value for key $zabbix_item_key" else error "fail sending Zabbix value for key $zabbix_item_key - exit status: $?" fi fi } log "init backup in $DAILY" # try to record when we started zabbix_sender_date_if_installed micro_backup_date_start . "$INSTRUCTIONS" # try to record when we concluded zabbix_sender_date_if_installed micro_backup_date_stop log "backup successfully concluded" # remember the last successfully concluded backup timestamp -date +%s > "$DAILY_LASTTIME" +write_timestamp "$DAILY_LASTTIME" diff --git a/bootstrap.sh b/bootstrap.sh index 8bf84cc..01ed1d0 100644 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -1,223 +1,308 @@ #!/bin/bash ### # Part of a stupid script to backup some stuff # # This bootstrap.sh file does nothing by itself but loads useful stuff. # # This file is loaded from 'backup-everything.sh' or 'rotate.sh' # -# Author: 2020, 2021 Valerio Bozzolan +# Author: 2020-2024 Valerio Bozzolan, contributors # License: MIT ## # current directory export DIR="${BASH_SOURCE%/*}" if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi # check if the standard input is not a terminal export INTERACTIVE= if [ -t 0 ]; then INTERACTIVE=1 fi # # Check if this is the quiet mode # # Default - not quite. # # Actually we are in quiet mode if it's not interactive. # This lazy behavior is to avoid stupid emails from the crontab # without the need to specify some --quiet etc. # Note that in quiet mode only WARN and ERROR messages are shown. # I've not created a --quiet flag because nobody is needing it. # # Edit your options - do not edit here. # #QUIET= # # Eventually write a log file # # Default - write a log file. # # Edit your options - do not edit here. # export WRITELOG=1 # path to the instructions file export INSTRUCTIONS="$DIR/backup-instructions.conf" # path to the configuration file export CONFIG="$DIR/options.conf" # no config no party if [ ! -f "$CONFIG" ]; then echo "missing options expected in $CONFIG" exit 1 fi # default mysql commands # --batch: avoid fancy columns (auto-enabled, but better to specify it) # --silent: avoid the column name to be included export MYSQL="mysql --batch --silent" export MYSQLDUMP="mysqldump --routines --triggers" # default rsync command # --archive: Try to keep all the properties # --fuzzy: Try to check if a file was renamed instead of delete and download a new one # It's efficient for example with log rotated files. # --delete: Delete the destination files if not present in the source # NOTE: we want this behaviour but it's not a good idea toghether with --fuzzy # that's why we do not use --delete but we use the next flags # --delay-updates Put all updated files into place at end (useful with fuzzy and delete modes) # --delete-delay Delete after everything (useful with fuzzy and delete modes) # NOTE: sometime some data is kept in damn .~tmp~ directories # So we are deprecating --delete-delay, and going back to --delete # and so removing --fuzzy # --hard-links Try to look for hard links during the transfer to do not copy separate files #RSYNC="rsync --archive --fuzzy --delay-updates --delete-delay --hard-links" # default rsync command # --archive: Try to keep all the properties # --delete: Delete the destination files if not present in the source # --hard-links Try to look for hard links during the transfer to do not copy separate files export RSYNC="rsync --archive --delete --hard-links" # rsync used in remote transfers # --compress Use more CPU to save network bandwidth export RSYNC_REMOTE="$RSYNC --compress" # default base backup directory for all backups export BASE="/home/backups" # default box name BOX="$(hostname)" export BOX # set to 1 to avoid any disservice (e.g. systemctl stop/start) export NO_DISSERVICE= # set to 1 to do nothing export PORCELAIN= # include the configuration to eventually override some options # shellcheck source=config.sh . "$CONFIG" # as default, if not interactive, set quite mode if [ -z "$QUIET" ] && [ "$INTERACTIVE" != 1 ]; then QUIET=1 fi # full pathnames to the backup directories export BASEBOX="$BASE/$BOX" export DAILY="$BASEBOX/daily" export DAILY_FILES="$DAILY/files" export DAILY_DATABASES="$DAILY/databases" export DAILY_LASTLOG="$DAILY/last.log" export DAILY_LASTTIME="$DAILY/last.timestamp" # apply the porcelain to the rsync command if [ "$PORCELAIN" = 1 ]; then RSYNC="$RSYNC --dry-run" RSYNC_REMOTE="$RSYNC_REMOTE --dry-run" fi # set default backup_last_log() lines if [ -z "$BACKUP_LAST_LOG_LINES" ]; then BACKUP_LAST_LOG_LINES=8000 fi +## +# Receive in input a file path, and a number of hours, and check whenever +# enough time (in days) was passed or not. +# +# If the file was never created, we assume that enough time was passed. +# +# @param string timestamp_file +# @param int days +# +function are_enough_days_passed() { + + # No args, no party. + # Note that the file argument will be checked later. + local timestamp_file="$1" + local expected_days="$2" + if [ -z "$expected_days" ]; then + echo "Error: Missing argument expected days." + exit 2 + fi + + local expected_seconds=$((expected_days * 24 * 3600)) + + are_enough_seconds_passed "$timestamp_file" "$expected_hours" +} + +## +# Receive in input a file path, and a number of hours, and check whenever +# enough time (in seconds) was passed or not. +# +# If the file was never created, we assume that enough time was passed. +# +# @param string timestamp_file +# @param int seconds +# +function are_enough_seconds_passed() { + + # No args, no party. + local timestamp_file="$1" + local expected_hours="$2" + if [ -z "$timestamp_file" ]; then + echo "Error: Missing argument timestamp file." + exit 2 + fi + if [ -z "$expected_hours" ]; then + echo "Error: Missing argument expected hours." + exit 2 + fi + + if [ -f "$timestamp_file" ]; then + + # Read the file, if it has sense. + local last_timestamp=$(<"$timestamp_file") + if [ "$last_timestamp" -lt 1000 ]; then + echo "Error: Bad format in file $timestamp_file" + exit 2 + fi + + local current_timestamp=$(date +%s) + local diff_seconds=$((current_timestamp - last_timestamp)) + + # If enough time is passed, return true. + [ "$diff_seconds" -ge "$expected_seconds" ]; + fi + + # The file doesn't exist. Return nothing special (0, that is True). +} + +## +# Receive in input a file path, and write there the current timestamp. +# +# @param string timestamp_file +# +function write_timestamp() { + + # No arg, no party. + local timestamp_file="$1" + if [ -z "$timestamp_file" ]; then + echo "Error: Missing timestamp file argument." + exit 1 + fi + + # Write the current Unix timestamp. + date +%s > "$timestamp_file" +} + ### # Print something # # It also put the message in the backup directory # # @param string severity # @param string message # function printthis() { local msg msg="[$(date)][$1] $2" # print to standard output if it's not in quiet mode if [ "$QUIET" != 1 ]; then printf "%s\n" "$msg" fi # put in the log file if possible if [ -f "$DAILY_LASTLOG" ] && [ "$WRITELOG" = 1 ]; then printf "%s\n" "$msg" >> "$DAILY_LASTLOG" fi } ### # Run an rsync # function copy() { # show what we are doing log "copy $*" # run the rsync command if [ "$PORCELAIN" != 1 ]; then $RSYNC $@ fi } ### # Remove a pathname # function drop() { # show what we are doing log "drop $*" # well, proceed... finger crossed... with some protections if [ "$PORCELAIN" != 1 ]; then rm --recursive --force --one-file-system --preserve-root -- $@ fi } ### # Move something somewhere # function move() { # show what we are doing log "move $*" if [ "$PORCELAIN" != 1 ]; then mv --force $@ fi } ### # Print a information message # # @param msg Message # function log() { printthis INFO "$1" } ### # Print a warning message # # @param msg Message # function warn() { printthis WARN "$1" } ### # Print an error message # # @param msg Message # function error() { printthis ERROR "$1" } diff --git a/rotate.sh b/rotate.sh index cda2692..5f5701e 100755 --- a/rotate.sh +++ b/rotate.sh @@ -1,172 +1,157 @@ #!/bin/bash ### # Stupid script to rotate a backup # # Author: Valerio B. # Date: Wed 4 Ago 2020 # License: CC 0 - public domain ## # do not proceed in case of errors set -e # current directory MYDIR="$(dirname "$(realpath "$0")")" # as default don't be quiet while rotating QUIET=0 # as default don't write in the log while rotating WRITELOG=0 # # Maximum time that your rotation could last # # Right now this should be a good default since it doesn't make much sense # for a rotation to take more than this number of hours. # # If the script takes longer than this, the next rotation may not run. # # Note: at the moment this must be shorter than a single day. # # Current default: 6 hours (6 * 60 * 60 = 21600 seconds) MAX_ROTATE_SECONDS=21600 # include all the stuff and useful functions . "$MYDIR"/bootstrap.sh # arguments place="$1" days="$2" max="$3" # expected file containing last timestamp last_timestamp_file="$place.timestamp" -# current timestamp -current_timestamp=$(date +%s) - # show usage function show_help_rotate() { echo "USAGE" echo " $0 PATH DAYS MAX_ROTATIONS" echo "EXAMPLE" echo " $0 /home/backups 1 30" } function harden() { local harden_path="$1" # no path no party if [ -z "$harden_path" ]; then echo "Wrong usage of harden" exit 2 fi # Harden rotations # # Note that non-privileged users should be able to push their last copy, # but MUST not in any way be able to touch older copies chown root:root "$harden_path" chmod 600 "$harden_path" } # all the arguments must exist (just check the last one) if [ -z "$max" ]; then echo "Bad usage" show_help_rotate exit 1 fi # the place to be rotated must exist if [ ! -e "$place" ]; then error "unexisting directory '$place'" exit 2 fi # validate max parameter if [ "$max" -lt 2 ]; then echo "The MAX parameter must be greater than 1" show_help_rotate exit 3 fi -# check if the last timestamp was writed -if [ -f "$last_timestamp_file" ]; then - - # check the timestamp saved in the file - timestamp=$(<"$last_timestamp_file") - if [ "$timestamp" -lt 1000 ]; then - echo "bad format in file $last_timestamp_file" - exit - fi - - # seconds spent from the last rotation - diff_seconds=$(expr "$current_timestamp" - "$timestamp") +# expected seconds from the last rotation before continuing +# NOTE: leave the star escaped to avoid syntax error in expr +expected_seconds=$(expr "$days" "*" 86400) - # expected seconds from the last rotation before continuing - # NOTE: leave the star escaped to avoid syntax error in expr - expected_seconds=$(expr "$days" "*" 86400) +# check if the duration in seconds is a day or more +if [ "$expected_seconds" -ge 86400 ]; then - # check if the duration in seconds is a day or more - if [ "$expected_seconds" -ge 86400 ]; then - - # the expected time since the last execution is never exactly the number of days in seconds - # Solution: remove few hours from the expected (just to say, uhm, 5 hours) - expected_seconds=$(expr "$expected_seconds" - "$MAX_ROTATE_SECONDS") - fi + # the expected time since the last execution is never exactly the number of days in seconds + # Solution: remove few hours from the expected (just to say, uhm, 5 hours) + expected_seconds=$(expr "$expected_seconds" - "$MAX_ROTATE_SECONDS") +fi - # check if it's not passed enought time - if [ "$diff_seconds" -lt "$expected_seconds" ]; then - warn "Doing nothing: last rotation was executed $diff_seconds seconds ago (expected at least $expected_seconds)" - exit - fi +# do not proceed if not enough time passed since last execution on that directory +# this avoids daylight saving time change problems +# this also avoids race conditions when starting parallel executions by mistake +if ! are_enough_seconds_passed "$last_timestamp_file" "$expected_seconds"; then + warn "doing nothing: last rotation was executed too recently on $place: now-last $(date +%s)-$(< "$last_timestamp_file") - expected at least $expected_seconds seconds" + exit 0 fi # save the last timestamp before rotating everything # this will avoid even parallel rotations -echo "$(date +%s)" > "$last_timestamp_file" +write_timestamp "$last_timestamp_file" # eventually drop the last backup step # if it does not exist, don't care max_path="$place.$max" drop "$max_path" # shift all the backups after="$max" while [[ "$after" -gt 1 ]]; do before=$(expr "$after" - 1) # do not process the root directory for no reason in the world if you type that by mistake # the --preserve-root is already implicit but... let's be sure! asd before_path="$place.$before" after_path="$place.$after" # the source must exist. asd if [ -e "$before_path" ]; then # the trailing slash means: copy files and not just the directory move "$before_path/" "$after_path" harden "$after_path" fi # next after="$before" done # at the end, move the base forward # the trailing slash means: copy files and not just the directory copy "$place/" "$place.1" harden "$place.1" # yeah! log "rotation concluded"