#!/usr/bin/env python # backup.py - script for efficient backup # # Copyright (C) 2015 Arthur de Jong # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # The files produced as output from the software do not automatically fall # under the copyright of the software, unless explicitly stated otherwise. import datetime import json import math import os import os.path import random import socket import sqlite3 import string import tarfile import time class Config(object): # maximum size in backed up bytes per archive # (size may be smaller or slightly larger due to how files are spread # over archives) block_size = 10 * 1024 * 1024 # directory that will contain the local meta-data cache cache_dir = None # do not use existing archives that have less than this percentage # of usable data (relative to block_size) archive_min_perc = 10 # use an extractlist for archives that are only effectively used # below this percentage archive_use_extractlist = 50 compression = 'gzip' encryption = 'gpg' # umask used for files created in the repository # TODO: UMASK = 0077 class MetaData(object): def __init__(self, cache_dir): # set a default cache directory if not cache_dir: import xdg.BaseDirectory cache_dir = os.path.join( xdg.BaseDirectory.xdg_cache_home, 'backupscript') # create cache directory if needed if not os.path.isdir(cache_dir): os.makedirs(cache_dir) # set up SQLite connection self.connection = sqlite3.connect('%s/metadata.sqlite' % cache_dir) # create database tables self.connection.executescript(''' PRAGMA secure_delete = false; PRAGMA temp_store = MEMORY; PRAGMA cache_size = 20000; CREATE TABLE IF NOT EXISTS `archives` ( `archive` TEXT NOT NULL, `path` TEXT NOT NULL, `size` INTEGER NOT NULL, `meta` TEXT NOT NULL, `used` INTEGER NOT NULL DEFAULT 0 ); CREATE INDEX IF NOT EXISTS `archives_archive_idx` ON `archives` (`archive`); CREATE INDEX IF NOT EXISTS `archives_path_idx` ON `archives` (`path`); CREATE INDEX IF NOT EXISTS `archives_meta_idx` ON `archives` (`meta`); ''') def is_crawled(self, path): cursor = self.connection.execute(''' SELECT 1 FROM `crawled` WHERE `path` = ? ''', (path, )) return bool(list(cursor)) def is_archive(self, archive): cursor = self.connection.execute(''' SELECT 1 FROM `archives` WHERE `archive` = ? ''', (archive, )) return bool(list(cursor)) def mark_archived(self, archive, path, size=None, meta=None): """Mark the specified file as being part of the specified archive.""" if size is None or meta is None: cursor = self.connection.execute(''' SELECT `size`, `meta` FROM `crawled` WHERE `path` = ? ''', (path, )) (size, meta) = cursor.fetchone() self.connection.execute(''' INSERT INTO `archives` (`archive`, `path`, `size`, `meta`, `used`) VALUES (?, ?, ?, ?, ?) ''', (archive, path, size, meta, size)) def archive_contains_deleted(self, archive): """Check if the archive contains files that should not be extracted for this backup (in this case an extractlist is mandatory).""" cursor = self.connection.execute(''' SELECT 1 FROM `archives` WHERE `archive` = ? AND `path` NOT IN ( SELECT `path` FROM `crawled`) LIMIT 1 ''', (archive, )) return bool(list(cursor)) def commit(self): self.connection.commit() def base36(i): """Return a BASE36 encoded string of the integer value.""" alphabet = '0123456789abcdefghijklmnopqrstuvwxyz' result = [] while i != 0: i, d = divmod(i, 36) result.append(alphabet[d]) result.reverse() return ''.join(result) def generate_passphrase(bits, alphabet=string.letters + string.digits): l = int(math.ceil(math.log(math.pow(2, bits)) / math.log(len(alphabet)))) r = random.SystemRandom() return ''.join(r.choice(alphabet) for i in xrange(l + 1)) def generate_archive_name(): """Generate a name based on a timestamp and random value.""" name = '%8s-%8s' % ( base36(int(time.time() * 10)), base36(random.randrange(int('zzzzzzzz', 36) + 1))) return name.replace(' ', '0') def generate_snapshot_name(): """Create a new snapshot in the repository.""" return datetime.datetime.now().strftime('%Y%m%d-%H%M%S') class FileRepository(object): def __init__(self, path): self.path = path # FIXME: check that path exists self._passphrase = None def listdir(self, path): """Return the names of all the enties in the directory.""" try: return ( os.path.join(path, f) for f in os.listdir(os.path.join(self.path, path))) except: return () def exists(self, path): return os.path.exists(os.path.join(self.path, path)) def remove(self, path): os.remove(os.path.join(self.path, path)) def rename(self, old, new): if self.exists(new): self.remove(new) os.rename(os.path.join(self.path, old), os.path.join(self.path, new)) def list_archives(self): """Return archives that are found in the repository.""" import re filelist = set() for filename in self.listdir('archives'): m = re.match(r'archives/([0-9a-zA-Z]{8}-[0-9a-zA-Z]{8})\.', filename) if m: filelist.add(m.group(1).lower()) filelist = list(filelist) filelist.sort() return filelist def list_snapshots(self): """Return snapshots that are found in the repository.""" import re filelist = list() for filename in self.listdir('snapshots'): filelist.append(filename) filelist.sort() return filelist def write_file(self, filename, executable=False, encryption=None, compression=None): """Return an open file handle that can be used to write a file contents to.""" if compression: filename = compression.rename(filename) if encryption: filename = encryption.rename(filename) path = os.path.join(self.path, filename) dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) f = open(path, 'w') if executable: os.fchmod(f.fileno(), 0755) if encryption: f = encryption.writer(f) if compression: f = compression.writer(f) return f def get_filters(self, filename, encryption=None): from filters import ( GzipCompression, Bzip2Compression, XZCompression, GnuPGEncryption) filters = [] while True: if encryption and filename.endswith('.' + encryption.extension): filename = filename[:-(len(encryption.extension) + 1)] filters.append(encryption) elif filename.endswith('.' + GnuPGEncryption.extension): filename = filename[:-(len(GnuPGEncryption.extension) + 1)] filters.append(GnuPGEncryption(self)) elif filename.endswith('.' + GzipCompression.extension): filename = filename[:-(len(GzipCompression.extension) + 1)] filters.append(GzipCompression()) elif filename.endswith('.' + Bzip2Compression.extension): filename = filename[:-(len(Bzip2Compression.extension) + 1)] filters.append(Bzip2Compression()) elif filename.endswith('.' + XZCompression.extension): filename = filename[:-(len(XZCompression.extension) + 1)] filters.append(XZCompression()) else: break return filename, filters def expand(self, filename, encryption=None): path = os.path.dirname(filename) for f in self.listdir(path): if f.startswith(filename): name, filters = repo.get_filters(f, encryption) if name == filename: return f, filters return filename, [] def read_file(self, filename, encryption=None): """Returns an open file handle that can be used to read from.""" filename, filters = self.expand(filename, encryption) path = os.path.join(self.path, filename) f = open(path, 'r') for fltr in filters: f = fltr.reader(f) return f def get_passphrase(self): if not self._passphrase: # TODO: we should be able to use multiple passphrases for # different parts of the repository f = self.read_file('keys/passphrase', encryption=self.keyencryption) self._passphrase = f.read().strip() f.close() return self._passphrase def get_or_create_passphrase(self): try: return self.get_passphrase() except IOError: # generate a passphrase with about 256 bits entropy return generate_passphrase(256) def write_passphrase(self, passphrase): # FIXME: use expand to find old name filename = 'keys/passphrase' # remove any existing new files newfile, filters = self.expand(filename + '.new') if self.exists(newfile): self.remove(newfile) # write out the new encrypted file f = self.write_file(filename + '.new', encryption=self.keyencryption) f.write('%s\n' % passphrase) f.close() # figure out the file names newfile, filters = self.expand(filename + '.new') curfile, filters = self.expand(filename) oldfile, filters = self.expand(filename + '.old') # remove the old backup if self.exists(oldfile): self.remove(oldfile) # backup the old passphrase if self.exists(curfile): oldfile = (curfile + '.').replace('.', '.old.', 1).rstrip('.') self.rename(curfile, oldfile) # put new passphrase file in place self.rename(newfile, newfile.replace('.new', '', 1)) class TarWriter(object): def __init__(self, repo, db, compression, encryption): self.repo = repo self.db = db self.compression = compression self.encryption = encryption self.name = generate_archive_name() self.fileobj = repo.write_file( 'archives/%s.tar' % self.name, encryption=encryption, compression=compression) self.tar = tarfile.open(fileobj=self.fileobj, mode='w|') self.size = 0 self.directories = set() def _add(self, path, size=None, meta=None): self.tar.add(path, recursive=False) self.db.mark_archived(self.name, path, size, meta) def _add_parents(self, path): """Ensure that directory entries are correctly created in the archive.""" if os.path.isdir(path): self.directories.add(path) parent = os.path.dirname(path) if parent not in self.directories: if self.db.is_crawled(parent): self._add_parents(parent) self._add(parent) self.directories.add(parent) def add(self, path, size, meta): """Add the specified path to the archive.""" self._add_parents(path) self._add(path, size, meta) self.size += size def _write_metadata(self): cursor = self.db.connection.execute(''' SELECT `meta` FROM `archives` WHERE `archive` = ? ORDER BY `path` ''', (self.name, )) f = repo.write_file( 'archives/%s.json' % self.name, encryption=self.encryption, compression=self.compression) f.write('[') first = True for meta, in cursor: if first: f.write('\n' + meta) else: f.write(',\n' + meta) first = False f.write('\n]\n') f.close() def close(self): self.db.commit() self.tar.close() self.fileobj.close() self._write_metadata() def select_archives(config, db): """Return a list of existing archives that can be re-used for this backup run.""" # find archives that contain at least archive_min_perc% useful files cursor = db.connection.execute(''' SELECT `archive`, SUM(`used`) AS `totused`, 100 * SUM(`used`) / SUM(`size`) AS `fill` FROM `archives` GROUP BY `archive` HAVING `totused` > ? ORDER BY `archive` ASC ''', (int((config.block_size * config.archive_min_perc) / 100), )) used_archives = [] for archive, used, fill in cursor: print 'Will use %3s%% of %s' % (fill, archive) extractlist = None # if the percentage used is low enough or if extracting the full # archive will cause problems, use an extractlist if fill < config.archive_use_extractlist or \ db.archive_contains_deleted(archive): # generate the list of files that should be extracted from the # specified archive # TODO: # (currently this does not include directories at all but it could # be smater) # - this should not include any directories if any file under # that directory should be excluded # - (as an optimisation) this should not contain any files # under directories that are listed cursor = db.connection.execute(''' SELECT `archives`.`path` FROM `archives` INNER JOIN `crawled` ON `archives`.`meta` = `crawled`.`meta` WHERE `archive` = ? AND `is_dir` = 0 ORDER BY `archives`.`path` ''', (archive, )) extractlist = list(path for path, in cursor) used_archives.append((archive, extractlist)) return used_archives def update_crawled(db, used_archives): """Mark crawled files as dumped based on the provided archives.""" # go over the archives and reconstruct the to be restored tree so we can # check for any crawled files that remain to be dumped # (this can happen with files extracted from the extractlist before their # directory entries are present or when a later used archive includes an # incorrect file) db.connection.execute(''' CREATE TEMPORARY TABLE `restored` ( `path` TEXT PRIMARY KEY, `meta` TEXT NOT NULL ) ''') for archive, extractlist in used_archives: # TODO: if extractlist contains a directory, it should be restored recursively if not extractlist: extractlist = [] db.connection.execute(''' INSERT OR REPLACE INTO `restored` (`path`, `meta`) SELECT `path`, `meta` FROM `archives` WHERE `archive` = ? ''' + ( 'AND `path` IN (%s)' % ', '.join('?' * len(extractlist)) if extractlist else ''), [archive, ] + extractlist) if extractlist: # any parent directory timestamp will be updated when extracting dirs = set(os.path.dirname(path) for path in extractlist) db.connection.executemany(''' INSERT OR REPLACE INTO `restored` (`path`, `meta`) VALUES (?, '') ''', ((path, ) for path in dirs)) db.commit() # update crawled table db.connection.execute(''' UPDATE `crawled` SET `dumped` = 1 WHERE `meta` IN ( SELECT `meta` FROM `restored`) ''') db.commit() # drop restored table db.connection.execute(''' DROP TABLE `restored` ''') def create_archives(config, repo, db, compression, encryption, used_archives): """Create any archives for crawled, undumped files.""" # get the files that have not yet been marked as part of an archive cursor = db.connection.execute(''' SELECT `path`, `size`, `meta` FROM `crawled` WHERE `dumped` = 0 ORDER BY `id` ''') archive = None for path, size, meta in cursor: if archive and archive.size + size >= config.block_size: archive.close() used_archives.append((archive.name, None)) archive = None if archive is None: archive = TarWriter(repo, db, compression, encryption) print 'Creating %s' % archive.name try: archive.add(path, size, meta) except IOError: import traceback print traceback.format_exc() if archive: archive.close() used_archives.append((archive.name, None)) def write_restore_sh(fp, snapshot, used_archives): import textwrap fp.write(textwrap.dedent(''' #!/bin/sh # restore.sh - script to do a full restore from backup %s umask 077 dir=`dirname "$0"` REPO=`cd "$dir/../.." && pwd` EXTMPDIR="/tmp/extract.$$" mkdir "$EXTMPDIR" || exit 1 ''' % snapshot).lstrip()) def x(filename): filename, filters = repo.expand(filename) out = [] for n, f in enumerate(filters): if n == 0: out.append('%s < "$REPO"/%s' % (f.restore_cmd, filename)) else: out.append(' | %s' % f.restore_cmd) return ''.join(out) if repo.keyencryption: from filters import GnuPGEncryption # TODO: if supporting multiple keys decrypt any keys that # are used by archives filename, filters = repo.expand('keys/passphrase') fp.write(textwrap.dedent(''' # decrypt passphrase %s < "$REPO"/%s > "$EXTMPDIR"/passphrase ''' % ( repo.keyencryption.restore_cmd, filename)).lstrip()) fp.write('# unpack archives\n') for archive, extractlist in used_archives: if extractlist: # write code for extracting the extractlist fp.write( '%s > "$EXTMPDIR"/%s.list\n' % ( x('snapshots/%s/%s.list' % (snapshot, archive)), archive)) # write code for unpacking with extractlist fp.write( '%s | tar -xpf - -T "$EXTMPDIR"/%s.list\n' % ( x('archives/%s.tar' % archive), archive)) else: fp.write( '%s | tar -xpf -\n' % x('archives/%s.tar' % archive)) fp.write(textwrap.dedent(''' # clean up rm -rf "$EXTMPDIR" exit 0 ''')) # TODO: perhaps write a detached signature for restore.sh # (write restore.sh to cache first, then sign, then to repository) # write metadata-dump for snapshot (aka generation) def resync(db, repo): """Resynchronise metadata cache with the information from the repository.""" # remove archives from database that are not in the repository archives = repo.list_archives() db.connection.execute(''' DELETE FROM `archives` WHERE `archive` NOT IN (%s) ''' % ', '.join('?' * len(archives)), archives) db.commit() # read archive metadata that is missing from the database for archive in archives: if not db.is_archive(archive): print 'Importing %s metadata' % archive # FIXME: find all archive files (also check tar file existence) try: f = repo.read_file('archives/%s.json' % archive) metadata = json.load(f) f.close() db.connection.executemany(''' INSERT OR REPLACE INTO `archives` (`archive`, `path`, `size`, `meta`) VALUES (?, ?, ?, ?) ''', (( archive, meta['path'], meta['size'], json.dumps(meta, sort_keys=True)) for meta in metadata)) db.commit() except IOError: import traceback print traceback.format_exc() def backup(config, repo): from crawler import crawl from filters import ( GzipCompression, Bzip2Compression, XZCompression, GnuPGEncryption, GnuPGKeyEncryption, NoEncryption) # configure compression and encryption if config.compression == 'none': compression = NoEncryption() elif config.compression == 'bzip2': compression = Bzip2Compression() elif config.compression == 'xz': compression = XZCompression() else: compression = GzipCompression() if config.encryption == 'none': encryption = NoEncryption() repo.keyencryption = None else: encryption = GnuPGEncryption(repo) repo.keyencryption = GnuPGKeyEncryption() # check existance of passphrase if repo.keyencryption: try: repo.get_passphrase() except IOError: print 'Use set-keys first' return # bring metadata cache in sync with repository db = MetaData(config.cache_dir) resync(db, repo) # prepare the name of the snapshot (generation) snapshot = generate_snapshot_name() # TODO: or have some way to specify it on the command-line try: from dateutil.tz import tzlocal start_time = datetime.datetime.now(tzlocal()) except ImportError: start_time = datetime.datetime.now() # fill the crawled table with information from the filesystem db.connection.executescript(''' CREATE TEMPORARY TABLE `crawled` ( `id` INTEGER PRIMARY KEY, `path` TEXT NOT NULL, `size` INTEGER NOT NULL, `is_dir` BOOLEAN NOT NULL, `meta` TEXT NOT NULL, `dumped` BOOLEAN NOT NULL DEFAULT 0 ); ''') with db.connection: if not config.files: raise ValueError('Nothing to backup') crawl(config.files, db) db.connection.executescript(''' CREATE INDEX IF NOT EXISTS `crawled_path_idx` ON `crawled` (`path`); CREATE INDEX IF NOT EXISTS `crawled_meta_idx` ON `crawled` (`meta`); CREATE INDEX IF NOT EXISTS `crawled_dumped_idx` ON `crawled` (`dumped`); ''') # find archives with optional extractlists that will form the basis # on top of which we will create additional arhives used_archives = select_archives(config, db) # mark crawled files as dumped based on the provided archives with db.connection: update_crawled(db, used_archives) # create new archives of as of yet undumped files # (this appends to used_archives) with db.connection: create_archives(config, repo, db, compression, encryption, used_archives) # write files list for this snapshot cursor = db.connection.execute(''' SELECT `meta` FROM `crawled` ORDER BY `id` ''') f = repo.write_file( 'snapshots/%s/files.json' % snapshot, encryption=encryption, compression=compression) f.write('[') first = True for meta, in cursor: if first: f.write('\n' + meta) else: f.write(',\n' + meta) first = False f.write('\n]\n') f.close() # write out extractlists for archive, extractlist in used_archives: if extractlist: f = repo.write_file( 'snapshots/%s/%s.list' % (snapshot, archive), encryption=encryption, compression=compression) for path in extractlist: f.write('%s\n' % path.lstrip('/')) f.close() # create restore script and snapshot files restorescript = repo.write_file( 'snapshots/%s/restore.sh' % snapshot, executable=True) write_restore_sh(restorescript, snapshot, used_archives) restorescript.close() # get statistics from crawled cursor = db.connection.execute(''' SELECT COUNT(*), SUM(`size`) FROM `crawled` ''') files, size = cursor.fetchone() # write meta-data f = repo.write_file( 'snapshots/%s/info.json' % snapshot, encryption=encryption, compression=compression) try: from dateutil.tz import tzlocal end_time = datetime.datetime.now(tzlocal()) except ImportError: end_time = datetime.datetime.now() json.dump(dict( snapshot=snapshot, start_time=start_time.isoformat(), end_time=end_time.isoformat(), files=files, size=size, hostname=socket.gethostname(), paths=args.files, archives=[archive for archive, extractlist in used_archives], ), f) f.close() def set_keys(config, repo): from filters import GnuPGKeyEncryption repo.keyencryption = GnuPGKeyEncryption(config.keys) passphrase = repo.get_or_create_passphrase() # TODO: read existing keys from the REPO and add config.keys repo.write_passphrase(passphrase) def list_backups(config, repo): """List snapshots in the repository and print information.""" import dateutil.parser from filters import GnuPGKeyEncryption repo.keyencryption = GnuPGKeyEncryption() for snapshot in repo.list_snapshots(): try: f = repo.read_file('%s/info.json' % snapshot) info = json.load(f) f.close() start_time = info.get('start_time', '') if start_time: start_time = dateutil.parser.parse(start_time) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() end_time = info.get('end_time', '') if end_time: end_time = dateutil.parser.parse(end_time) end_time = end_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() print '%s %s .. %s' % ( os.path.basename(snapshot), start_time, end_time) hostname = info.get('hostname') for path in info.get('paths', ()): print ' %s%s%s' % (hostname, ':' if hostname else '', path) extra = [] if 'files' in info: extra.append('%d files' % info['files']) if 'size' in info: extra.append('%d bytes' % info['size']) if 'archives' in info: extra.append('%d archives used' % len(info['archives'])) if extra: print ' %s' % ', '.join(extra) except IOError: pass if __name__ == '__main__': from cmdline import parser args = parser.parse_args() config = Config() for (k, v) in args.__dict__.items(): if v is not None: setattr(config, k, v) repo = FileRepository(config.repository) if args.command == 'backup': backup(config, repo) elif args.command == 'set-keys': set_keys(config, repo) elif args.command == 'backups': list_backups(config, repo)