#!/usr/bin/env python # backup.py - script for efficient backup # # Copyright (C) 2015 Arthur de Jong # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # The files produced as output from the software do not automatically fall # under the copyright of the software, unless explicitly stated otherwise. import datetime import itertools import json import math import os import os.path import random import re import socket import sqlite3 import string import sys import tarfile import time class Config(object): # maximum size in backed up bytes per archive # (size may be smaller or slightly larger due to how files are spread # over archives) block_size = 10 * 1024 * 1024 # minimal number of files per archive (set to 0 to disable) # (this overrides block_size) min_files = 10 # directory that will contain the local meta-data cache cache_dir = None # do not use existing archives that have less than this percentage # of usable data (relative to block_size) archive_min_perc = 10 # use an extractlist for archives that are only effectively used # below this percentage archive_use_extractlist = 50 compression = 'gzip' encryption = 'gpg' # umask used for files created in the repository # TODO: UMASK = 0077 def escape_like(pattern): """Escape LIKE expression using backslash.""" # perform escaping of \, % and _ first pattern = pattern.replace('\\', '\\\\') pattern = pattern.replace('%', '\\%') return pattern.replace('_', '\\_') def sqlite_regexp(expr, item): return re.search(expr, item) is not None class MetaData(object): def __init__(self, cache_dir, uuid): # set a default cache directory if not cache_dir: import xdg.BaseDirectory cache_dir = os.path.join( xdg.BaseDirectory.xdg_cache_home, 'sloth') # create cache directory if needed if not os.path.isdir(cache_dir): os.makedirs(cache_dir) # set up SQLite connection self.connection = sqlite3.connect('%s/%s.sqlite' % (cache_dir, uuid)) self.connection.create_function('REGEXP', 2, sqlite_regexp) # create database tables self.connection.executescript(''' PRAGMA secure_delete = false; PRAGMA temp_store = MEMORY; PRAGMA cache_size = 20000; PRAGMA synchronous = OFF; PRAGMA journal_mode = MEMORY; CREATE TABLE IF NOT EXISTS `archive_contents` ( `archive` TEXT NOT NULL, `path` TEXT NOT NULL, `size` INTEGER NOT NULL, `meta` TEXT NOT NULL, `used` INTEGER NOT NULL DEFAULT 0 ); CREATE INDEX IF NOT EXISTS `archive_contents_archive_idx` ON `archive_contents` (`archive`); CREATE INDEX IF NOT EXISTS `archive_contents_path_idx` ON `archive_contents` (`path`); CREATE INDEX IF NOT EXISTS `archive_contents_meta_idx` ON `archive_contents` (`meta`); CREATE TABLE IF NOT EXISTS `backups` ( `backup` TEXT PRIMARY KEY, `json` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `backups_backup_idx` ON `backups` (`backup`); CREATE TABLE IF NOT EXISTS `backup_contents` ( `backup` TEXT NOT NULL, `path` TEXT NOT NULL, `meta` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `backup_contents_backup_idx` ON `backup_contents` (`backup`); CREATE INDEX IF NOT EXISTS `backup_contents_path_idx` ON `backup_contents` (`path`); ''') def is_crawled(self, path): cursor = self.connection.execute(''' SELECT 1 FROM `crawled` WHERE `path` = ? ''', (path, )) return bool(list(cursor)) def is_archive(self, archive): cursor = self.connection.execute(''' SELECT 1 FROM `archive_contents` WHERE `archive` = ? ''', (archive, )) return bool(list(cursor)) def base36(i): """Return a BASE36 encoded string of the integer value.""" alphabet = '0123456789abcdefghijklmnopqrstuvwxyz' result = [] while i != 0: i, d = divmod(i, 36) result.append(alphabet[d]) result.reverse() return ''.join(result) def generate_passphrase(bits, alphabet=string.ascii_letters + string.digits): l = int(math.ceil(math.log(math.pow(2, bits)) / math.log(len(alphabet)))) r = random.SystemRandom() return ''.join(r.choice(alphabet) for i in xrange(l + 1)) def generate_archive_name(): """Generate a name based on a timestamp and random value.""" name = '%8s-%8s' % ( base36(int(time.time() * 10)), base36(random.randrange(int('zzzzzzzz', 36) + 1))) return name.replace(' ', '0') def generate_backup_name(): """Create a new backup in the repository.""" return datetime.datetime.now().strftime('%Y%m%d-%H%M%S') class FileRepository(object): def __init__(self, path): self.path = path # FIXME: check that path exists self._passphrase = None self._uuid = None def listdir(self, path): """Return the names of all the enties in the directory.""" try: return ( os.path.join(path, name) for name in os.listdir(os.path.join(self.path, path))) except: return () def exists(self, path): return os.path.exists(os.path.join(self.path, path)) def remove(self, path, recurse=False): # TODO: this only recurses one directory deep and will fail if # the directory contains other directories if recurse: for subpath in self.listdir(path): os.remove(os.path.join(self.path, subpath)) os.rmdir(os.path.join(self.path, path)) else: os.remove(os.path.join(self.path, path)) def rename(self, old, new): if self.exists(new): self.remove(new) os.rename(os.path.join(self.path, old), os.path.join(self.path, new)) def list_archives(self): """Return archives that are found in the repository.""" import re filelist = set() for filename in self.listdir('archives'): m = re.match(r'archives/([0-9a-zA-Z]{8}-[0-9a-zA-Z]{8})\.', filename) if m: filelist.add(m.group(1).lower()) filelist = list(filelist) filelist.sort() return filelist def list_backups(self): """Return backups that are found in the repository.""" import re filelist = list() for filename in self.listdir('backups'): filelist.append(os.path.basename(filename)) filelist.sort() return filelist def write_file(self, filename, executable=False, encryption=None, compression=None): """Return an open file handle that can be used to write a file contents to.""" if compression: filename = compression.rename(filename) if encryption: filename = encryption.rename(filename) path = os.path.join(self.path, filename) dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) f = open(path, 'wb') if executable: os.fchmod(f.fileno(), 0o0755) if encryption: f = encryption.writer(f) if compression: f = compression.writer(f) return f def get_filters(self, filename, encryption=None): from filters import ( GzipCompression, Bzip2Compression, XZCompression, GnuPGEncryption) filters = [] while True: if encryption and filename.endswith('.' + encryption.extension): filename = filename[:-(len(encryption.extension) + 1)] filters.append(encryption) elif filename.endswith('.' + GnuPGEncryption.extension): filename = filename[:-(len(GnuPGEncryption.extension) + 1)] filters.append(GnuPGEncryption(self)) elif filename.endswith('.' + GzipCompression.extension): filename = filename[:-(len(GzipCompression.extension) + 1)] filters.append(GzipCompression()) elif filename.endswith('.' + Bzip2Compression.extension): filename = filename[:-(len(Bzip2Compression.extension) + 1)] filters.append(Bzip2Compression()) elif filename.endswith('.' + XZCompression.extension): filename = filename[:-(len(XZCompression.extension) + 1)] filters.append(XZCompression()) else: break return filename, filters def expand(self, filename, encryption=None): path = os.path.dirname(filename) for f in self.listdir(path): if f.startswith(filename): name, filters = repo.get_filters(f, encryption) if name == filename: return f, filters return filename, [] def read_file(self, filename, encryption=None): """Returns an open file handle that can be used to read from.""" filename, filters = self.expand(filename, encryption) path = os.path.join(self.path, filename) f = open(path, 'rb') for fltr in filters: f = fltr.reader(f) return f def get_passphrase(self): if not self._passphrase: # TODO: we should be able to use multiple passphrases for # different parts of the repository from filters import GnuPGKeyEncryption f = self.read_file( 'keys/passphrase', encryption=GnuPGKeyEncryption()) self._passphrase = str(f.read()).strip() f.close() return self._passphrase def get_or_create_passphrase(self): try: return self.get_passphrase() except IOError: # generate a passphrase with about 256 bits entropy return generate_passphrase(256) def write_passphrase(self, passphrase, keys): from filters import GnuPGKeyEncryption filename = 'keys/passphrase' # remove any existing new files newfile, filters = self.expand(filename + '.new') if self.exists(newfile): self.remove(newfile) # write out the new encrypted file f = self.write_file( filename + '.new', encryption=GnuPGKeyEncryption(keys)) f.write(('%s\n' % passphrase).encode('utf-8')) f.close() # figure out the file names newfile, filters = self.expand(filename + '.new') curfile, filters = self.expand(filename) oldfile, filters = self.expand(filename + '.old') # remove the old backup if self.exists(oldfile): self.remove(oldfile) # backup the old passphrase if self.exists(curfile): oldfile = (curfile + '.').replace('.', '.old.', 1).rstrip('.') self.rename(curfile, oldfile) # put new passphrase file in place self.rename(newfile, newfile.replace('.new', '', 1)) @property def uuid(self): if self._uuid: return self._uuid # try to read the uuid file try: f = self.read_file('uuid') uuid = str(f.read()).strip() f.close() if re.match(r'^[0-9a-z]{8,16}$', uuid): self._uuid = uuid return uuid except IOError: pass # ignore reading uuid, generate one instead # generate a new uuid uuid = '%16s' % base36(random.randrange(int(16 * 'z', 36) + 1)) uuid.replace(' ', '0') self._uuid = uuid # save the uuid in the repository f = self.write_file('uuid') f.write(('%s\n' % uuid).encode('utf-8')) f.close() return uuid class TarWriter(object): def __init__(self, repo, db, compression, encryption): self.repo = repo self.db = db self.compression = compression self.encryption = encryption self.name = generate_archive_name() self.fileobj = repo.write_file( 'archives/%s.tar' % self.name, encryption=encryption, compression=compression) self.tar = tarfile.open(fileobj=self.fileobj, mode='w|') self.size = 0 self.nfiles = 0 self.directories = set() def _add(self, path, size=None, meta=None): self.tar.add(path, recursive=False) if size is None or meta is None: cursor = self.db.connection.execute(''' SELECT `size`, `meta` FROM `crawled` WHERE `path` = ? ''', (path, )) (size, meta) = cursor.fetchone() self.db.connection.execute(''' INSERT INTO `archive_contents` (`archive`, `path`, `size`, `meta`) VALUES (?, ?, ?, ?) ''', (self.name, path, size, meta)) def _add_parents(self, path): """Ensure that directory entries are correctly created in the archive.""" if os.path.isdir(path): self.directories.add(path) parent = os.path.dirname(path) if parent not in self.directories: if self.db.is_crawled(parent): self._add_parents(parent) self._add(parent) self.directories.add(parent) def add(self, path, size, meta): """Add the specified path to the archive.""" self._add_parents(path) self._add(path, size, meta) self.size += size if size > 1: self.nfiles += 1 def _write_metadata(self): cursor = self.db.connection.execute(''' SELECT `meta` FROM `archive_contents` WHERE `archive` = ? ORDER BY `path` ''', (self.name, )) f = repo.write_file( 'archives/%s.json' % self.name, encryption=self.encryption, compression=self.compression) f.write('['.encode('utf-8')) first = True for meta, in cursor: if first: f.write(('\n' + meta).encode('utf-8')) else: f.write((',\n' + meta).encode('utf-8')) first = False f.write('\n]\n'.encode('utf-8')) f.close() def close(self): self.tar.close() self.fileobj.close() self._write_metadata() def select_archives(config, db): """Return a list of existing archives that can be re-used for this backup run.""" # clear archive usage with db.connection: db.connection.execute(''' UPDATE `archive_contents` SET `used` = 0 ''') # set used based on information in table with db.connection: db.connection.execute(''' UPDATE `archive_contents` SET `used` = `size` WHERE `meta` IN (SELECT `meta` FROM `crawled`) ''') # find archives that contain at least archive_min_perc% useful files cursor = db.connection.execute(''' SELECT `archive`, SUM(`used`) AS `totused`, 100 * SUM(`used`) / SUM(`size`) AS `fill` FROM `archive_contents` GROUP BY `archive` HAVING `totused` > ? ORDER BY `archive` ASC ''', (int((config.block_size * config.archive_min_perc) / 100), )) used_archives = [] for archive, used, fill in cursor: print('Will use %3s%% of %s' % (fill, archive)) use_extractlist = False extractlist = None # if the percentage used is low enough or if extracting the full # archive will cause problems, use an extractlist if fill < config.archive_use_extractlist: use_extractlist = True else: # check if the archive contains files that should not be extracted # for this backup (in this case an extractlist is mandatory cursor = db.connection.execute(''' SELECT 1 FROM `archive_contents` WHERE `archive` = ? AND `path` NOT IN ( SELECT `path` FROM `crawled`) LIMIT 1 ''', (archive, )) if bool(list(cursor)): use_extractlist = True if use_extractlist: # generate the list of files that should be extracted from the # specified archive # TODO: # (currently this does not include directories at all but it could # be smater) # - this should not include any directories if any file under # that directory should be excluded # - (as an optimisation) this should not contain any files # under directories that are listed cursor = db.connection.execute(''' SELECT `archive_contents`.`path` FROM `archive_contents` INNER JOIN `crawled` ON `archive_contents`.`meta` = `crawled`.`meta` WHERE `archive` = ? AND `is_dir` = 0 ORDER BY `archive_contents`.`path` ''', (archive, )) extractlist = list(path for path, in cursor) used_archives.append((archive, extractlist)) return used_archives def update_dumped(db, used_archives): """Mark crawled files as dumped based on the provided archives.""" # go over the archives and reconstruct the to be restored tree so we can # check for any crawled files that remain to be dumped # (this can happen with files extracted from the extractlist before their # directory entries are present or when a later used archive includes an # incorrect file) db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_restored` ( `path` TEXT PRIMARY KEY, `meta` TEXT NOT NULL ); ''') for archive, extractlist in used_archives: with db.connection: if not extractlist: extractlist = [] db.connection.execute(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) SELECT `path`, `meta` FROM `archive_contents` WHERE `archive` = ? ''', (archive, )) else: # TODO: if extractlist contains a directory, it should be restored recursively for i in range(0, len(extractlist), 500): chunk = extractlist[i:i + 500] db.connection.execute(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) SELECT `path`, `meta` FROM `archive_contents` WHERE `archive` = ? AND `path` IN (%s) ''' % ', '.join('?' * len(chunk)), [archive, ] + chunk) # any parent directory timestamp will be updated when extracting dirs = set(os.path.dirname(path) for path in extractlist) db.connection.executemany(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) VALUES (?, '') ''', ((path, ) for path in dirs)) # update crawled table with db.connection: db.connection.execute(''' UPDATE `crawled` SET `dumped` = 1 WHERE `meta` IN ( SELECT `meta` FROM `tmp_restored`) ''') # drop restored table db.connection.executescript(''' DROP TABLE `tmp_restored`; ''') def create_archives(config, repo, db, compression, encryption, used_archives): """Create any archives for crawled, undumped files.""" # get the files that have not yet been marked as part of an archive cursor = db.connection.execute(''' SELECT `path`, `size`, `meta` FROM `crawled` WHERE `dumped` = 0 ORDER BY `id` ''') archive = None # use fetchall because SQLite cannot handle partial reads from a cursor # if the database is being modified in another cursor for path, size, meta in cursor.fetchall(): if (archive and archive.nfiles >= config.min_files and archive.size + size >= config.block_size): archive.close() used_archives.append((archive.name, None)) archive = None if archive is None: archive = TarWriter(repo, db, compression, encryption) print('Creating %s' % archive.name) try: archive.add(path, size, meta) except (IOError, OSError): import traceback print(traceback.format_exc()) if archive: archive.close() used_archives.append((archive.name, None)) def write_restore_sh(fp, backup, used_archives): import textwrap fp.write(textwrap.dedent(''' #!/bin/sh # restore.sh - script to do a full restore from backup %s umask 077 dir=`dirname "$0"` REPO=`cd "$dir/../.." && pwd` EXTMPDIR="/tmp/extract.$$" mkdir "$EXTMPDIR" || exit 1 ''' % backup).lstrip().encode('utf-8')) def x(filename): filename, filters = repo.expand(filename) out = [] for n, f in enumerate(filters): if n == 0: out.append('%s < "$REPO"/%s' % (f.restore_cmd, filename)) else: out.append(' | %s' % f.restore_cmd) return ''.join(out) # decrypt the passphrase file if it is there if repo.exists(repo.expand('keys/passphrase')[0]): from filters import GnuPGKeyEncryption # TODO: if supporting multiple keys decrypt any keys that # are used by archives filename, filters = repo.expand('keys/passphrase') fp.write(textwrap.dedent(''' # decrypt passphrase %s < "$REPO"/%s > "$EXTMPDIR"/passphrase ''' % ( GnuPGKeyEncryption.restore_cmd, filename)).lstrip().encode('utf-8')) fp.write('# unpack archives\n'.encode('utf-8')) for archive, extractlist in used_archives: if extractlist: # write code for extracting the extractlist fp.write( ('%s > "$EXTMPDIR"/%s.list\n' % ( x('backups/%s/%s.list' % (backup, archive)), archive)).encode('utf-8')) # write code for unpacking with extractlist fp.write( ('%s | tar -xpf - -T "$EXTMPDIR"/%s.list\n' % ( x('archives/%s.tar' % archive), archive)).encode('utf-8')) else: fp.write( ('%s | tar -xpf -\n' % x('archives/%s.tar' % archive)).encode('utf-8')) fp.write(textwrap.dedent(''' # clean up rm -rf "$EXTMPDIR" exit 0 ''').encode('utf-8')) # TODO: perhaps write a detached signature for restore.sh # (write restore.sh to cache first, then sign, then to repository) # write metadata-dump for backup (aka snapshot, aka generation) def resync_archives(db, repo): """Update metadata cache with the information from the repository.""" # get list of archives in the repository db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_archives` ( `archive` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `tmp_archives_archive_idx` ON `tmp_archives` (`archive`); ''') with db.connection: db.connection.executemany(''' INSERT INTO `tmp_archives` (`archive`) VALUES (?) ''', ((archive, ) for archive in repo.list_archives())) # remove archives from database that are not in the repository with db.connection: db.connection.execute(''' DELETE FROM `archive_contents` WHERE `archive` NOT IN ( SELECT `archive` FROM `tmp_archives`) ''') # read archive metadata that is missing from the database cursor = db.connection.execute(''' SELECT `archive` FROM `tmp_archives` WHERE `archive` NOT IN ( SELECT DISTINCT `archive` FROM `archive_contents`) ''') # use fetchall because SQLite cannot handle partial reads from a cursor # if the database is being modified in another cursor for archive, in cursor.fetchall(): print('Importing archive %s file list' % archive) try: f = repo.read_file('archives/%s.json' % archive) metadata = json.load(f) f.close() with db.connection: db.connection.executemany(''' INSERT INTO `archive_contents` (`archive`, `path`, `size`, `meta`) VALUES (?, ?, ?, ?) ''', (( archive, meta['path'], meta['size'], json.dumps(meta, sort_keys=True)) for meta in metadata)) except IOError: import traceback print(traceback.format_exc()) # clean up db.connection.executescript(''' DROP TABLE `tmp_archives`; ''') def _check_backups(db, repo, table, backup=None): """Return the list of backups that need to be synced, checking the specified table.""" if backup is not None: # if we already have data for the backup, we're done cursor = db.connection.execute(''' SELECT 1 FROM `%s` WHERE `backup` = ? ''' % table, (backup, )) if bool(list(cursor)): return [] return [backup] # get list of backups in the repository db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_backups` ( `backup` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `tmp_backups_backup_idx` ON `tmp_backups` (`backup`); ''') with db.connection: db.connection.executemany(''' INSERT INTO `tmp_backups` (`backup`) VALUES (?) ''', ((backup, ) for backup in repo.list_backups())) # remove backups from database that are not in the repository with db.connection: db.connection.execute(''' DELETE FROM `%s` WHERE `backup` NOT IN ( SELECT `backup` FROM `tmp_backups`) ''' % table) # read backup metadata that is missing from the database backups = [ row[0] for row in db.connection.execute(''' SELECT `backup` FROM `tmp_backups` WHERE `backup` NOT IN ( SELECT `backup` FROM `%s`) ''' % table)] # clean up db.connection.executescript(''' DROP TABLE IF EXISTS `tmp_backups`; ''') return backups def resync_backups(db, repo): """Update metadata cache with the information from the repository.""" for backup in _check_backups(db, repo, 'backups'): print('Importing backup %s metadata' % backup) try: f = repo.read_file('backups/%s/info.json' % backup) info = json.load(f) f.close() with db.connection: db.connection.execute(''' INSERT OR REPLACE INTO `backups` (`backup`, `json`) VALUES (?, ?) ''', ( backup, json.dumps(info, sort_keys=True))) except IOError: import traceback print(traceback.format_exc()) def _read_files_json(repo, backup): # we take advantage of the fact that the JSON file contains a single path # per line (otherwise we would have to load the whole file in memory) f = repo.read_file('backups/%s/files.json' % backup) for line in f: line = line.strip('[], \n') if line: yield json.loads(line) f.close() def resync_backup_contents(db, repo, backup=None): """Update metadata cache with the information from the repository.""" for backup in _check_backups(db, repo, 'backup_contents', backup=backup): print('Importing backup %s file list' % backup) try: with db.connection: db.connection.executemany(''' INSERT INTO `backup_contents` (`backup`, `path`, `meta`) VALUES (?, ?, ?) ''', (( backup, meta['path'], json.dumps(meta, sort_keys=True)) for meta in _read_files_json(repo, backup))) except IOError: import traceback print(traceback.format_exc()) def backup(config, repo): from crawler import crawl from filters import ( GzipCompression, Bzip2Compression, XZCompression, GnuPGEncryption, NoEncryption) # configure compression and encryption if config.compression == 'none': compression = NoEncryption() elif config.compression == 'bzip2': compression = Bzip2Compression() elif config.compression == 'xz': compression = XZCompression() else: compression = GzipCompression() if config.encryption == 'none': encryption = NoEncryption() else: encryption = GnuPGEncryption(repo) # check existance of passphrase if repo.exists(repo.expand('keys/passphrase')[0]): try: repo.get_passphrase() except IOError: print('Use set-keys first') return if not config.files: raise ValueError('Nothing to backup') # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) resync_archives(db, repo) # prepare the name of the backup backup = generate_backup_name() # TODO: or have some way to specify it on the command-line try: from dateutil.tz import tzlocal start_time = datetime.datetime.now(tzlocal()) except ImportError: start_time = datetime.datetime.now() # build a list of excludes excludes = getattr(config, 'excludes', []) for excludefile in getattr(config, 'exclude_files', ()): with open(excludefile, 'r') as f: for line in f: line = line.rstrip('\n\r') if not line or line.startswith(';') or line.startswith('#'): continue excludes.append(line) # fill the crawled table with information from the filesystem db.connection.executescript(''' CREATE TEMPORARY TABLE `crawled` ( `id` INTEGER PRIMARY KEY, `path` TEXT NOT NULL, `size` INTEGER NOT NULL, `is_dir` BOOLEAN NOT NULL, `meta` TEXT NOT NULL, `dumped` BOOLEAN NOT NULL DEFAULT 0 ); ''') # add crawled files to the database with db.connection: db.connection.executemany(''' INSERT INTO `crawled` (`path`, `size`, `is_dir`, `meta`) VALUES (?, ?, ?, ?) ''', ( (path, size, is_dir, meta) for path, size, is_dir, meta in crawl(config.files, excludes))) # create indexes after crawling db.connection.executescript(''' CREATE INDEX IF NOT EXISTS `crawled_path_idx` ON `crawled` (`path`); CREATE INDEX IF NOT EXISTS `crawled_meta_idx` ON `crawled` (`meta`); CREATE INDEX IF NOT EXISTS `crawled_dumped_idx` ON `crawled` (`dumped`); ''') # find archives with optional extractlists that will form the basis # on top of which we will create additional arhives with db.connection: used_archives = select_archives(config, db) # mark crawled files as dumped based on the provided archives update_dumped(db, used_archives) # create new archives of as of yet undumped files # (this appends to used_archives) with db.connection: create_archives(config, repo, db, compression, encryption, used_archives) # write files list for this backup cursor = db.connection.execute(''' SELECT `meta` FROM `crawled` ORDER BY `id` ''') f = repo.write_file( 'backups/%s/files.json' % backup, encryption=encryption, compression=compression) f.write('['.encode('utf-8')) first = True for meta, in cursor: if first: f.write(('\n' + meta).encode('utf-8')) else: f.write((',\n' + meta).encode('utf-8')) first = False f.write('\n]\n'.encode('utf-8')) f.close() # write out extractlists for archive, extractlist in used_archives: if extractlist: f = repo.write_file( 'backups/%s/%s.list' % (backup, archive), encryption=encryption, compression=compression) for path in extractlist: f.write(('%s\n' % path.lstrip('/')).encode('utf-8')) f.close() # create restore script and backup files restorescript = repo.write_file( 'backups/%s/restore.sh' % backup, executable=True) write_restore_sh(restorescript, backup, used_archives) restorescript.close() # get statistics from crawled files, size = db.connection.execute(''' SELECT COUNT(*), SUM(`size`) FROM `crawled` ''').fetchone() # write meta-data try: from dateutil.tz import tzlocal end_time = datetime.datetime.now(tzlocal()) except ImportError: end_time = datetime.datetime.now() info = dict( backup=backup, start_time=start_time.isoformat(), end_time=end_time.isoformat(), files=files, size=size, hostname=socket.gethostname(), paths=args.files, archives=[archive for archive, extractlist in used_archives], ) f = repo.write_file( 'backups/%s/info.json' % backup, encryption=encryption, compression=compression) f.write(json.dumps(info).encode('utf-8')) f.close() # store backup information in local cache with db.connection: db.connection.execute(''' INSERT OR REPLACE INTO `backups` (`backup`, `json`) VALUES (?, ?) ''', ( backup, json.dumps(info, sort_keys=True))) def set_keys(config, repo): passphrase = repo.get_or_create_passphrase() # TODO: read existing keys from the REPO and add config.keys repo.write_passphrase(passphrase, config.keys) def list_backups(config, repo): """List backups in the repository and print information.""" import dateutil.parser # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) resync_backups(db, repo) # list backups cursor = db.connection.execute(''' SELECT `backup`, `json` FROM `backups` ORDER BY `backup` ''') for backup, info in cursor: info = json.loads(info) start_time = info.get('start_time', '') if start_time: start_time = dateutil.parser.parse(start_time) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() end_time = info.get('end_time', '') if end_time: end_time = dateutil.parser.parse(end_time) end_time = end_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() print('%s %s .. %s' % ( os.path.basename(backup), start_time, end_time)) hostname = info.get('hostname') for path in info.get('paths', ()): print(' %s%s%s' % (hostname, ':' if hostname else '', path)) extra = [] if 'files' in info: extra.append('%d files' % info['files']) if 'size' in info: extra.append('%d bytes' % info['size']) if 'archives' in info: extra.append('%d archives used' % len(info['archives'])) if extra: print(' %s' % ', '.join(extra)) def list_contents(config, repo): """List backup contents and print file listing.""" from ls import ls_format backup = config.backup # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) resync_backup_contents(db, repo, backup=backup) # filter file list pred = [] args = [backup] for pattern in config.files: pattern = pattern.rstrip('/') pred.append('''`path` = ?''') args.append(pattern) pred.append('''`path` LIKE ? ESCAPE '\\' ''') args.append(escape_like(pattern) + '/%') cursor = db.connection.execute(''' SELECT `path`, `meta` FROM `backup_contents` WHERE `backup` = ? %s %s ORDER BY `path` ''' % ('AND' if pred else '', ' OR '.join(pred)), args) # go over results print('%s:' % backup) for path, meta in cursor: ls_format(json.loads(meta)) def find(config, repo): """Find archives containing the files.""" from ls import ls_format from path import pattern2re # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) resync_backup_contents(db, repo) # find backups containing file pred = [] args = [] for pattern in config.files: if pattern.endswith('/'): pred.append('''(`path` REGEXP ? AND `meta` LIKE '%"is_dir"%')''') else: pred.append('`path` REGEXP ?') args.append(pattern2re(pattern.rstrip('/'))) cursor = db.connection.execute(''' SELECT `backup`, `meta` FROM `backup_contents` WHERE %s ORDER BY `backup`, `path` ''' % ' OR '.join(pred), args) # print results for backup, rows in itertools.groupby(cursor, lambda row: row[0]): print('%s:' % backup) for row in rows: ls_format(json.loads(row[1])) def remove_backups(config, repo): """Remove backups from the repository. Also removes the unused archives.""" # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) resync_backups(db, repo) # build a mapping between backups and archives db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_backup_archives` ( `backup` TEXT NOT NULL, `archive` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `tmp_backup_archives_backup_idx` ON `tmp_backup_archives` (`backup`); CREATE INDEX IF NOT EXISTS `tmp_backup_archives_archive_idx` ON `tmp_backup_archives` (`archive`); ''') cursor = db.connection.execute(''' SELECT `backup`, `json` FROM `backups` ORDER BY `backup` ''') for backup, info in cursor.fetchall(): info = json.loads(info) archives = info['archives'] with db.connection: db.connection.executemany(''' INSERT INTO `tmp_backup_archives` (`backup`, `archive`) VALUES (?, ?) ''', ((backup, archive) for archive in archives)) archive_files = None # go over backups to remove for backup in config.backups: # remove backup directory print('Removing backup %s' % backup) repo.remove('backups/%s' % backup, recurse=True) # remove backup from cache with db.connection: db.connection.execute(''' DELETE FROM `backups` WHERE `backup` = ? ''', (backup, )) db.connection.execute(''' DELETE FROM `backup_contents` WHERE `backup` = ? ''', (backup, )) # find archives that are no longer used cursor = db.connection.execute(''' SELECT `archive` FROM `tmp_backup_archives` WHERE `backup` = ? AND `archive` NOT IN ( SELECT `archive` FROM `tmp_backup_archives` WHERE `backup` <> ?) ''', (backup, backup)) for archive, in cursor.fetchall(): print('Removing archive %s' % archive) # remove archive files that start with `archive` if archive_files is None: archive_files = list(repo.listdir('archives')) for archive_file in archive_files: if os.path.basename(archive_file).startswith(archive + '.'): repo.remove(archive_file) # remove archive from cache with db.connection: db.connection.execute(''' DELETE FROM `archive_contents` WHERE `archive` = ? ''', (archive, )) # remove backup from cache with db.connection: db.connection.execute(''' DELETE FROM `tmp_backup_archives` WHERE `backup` = ? ''', (backup, )) if __name__ == '__main__': from cmdline import parser args = parser.parse_args() config = Config() for (k, v) in args.__dict__.items(): if v is not None: setattr(config, k, v) repo = FileRepository(config.repository) if args.command == 'backup': backup(config, repo) elif args.command == 'set-keys': set_keys(config, repo) elif args.command == 'backups': list_backups(config, repo) elif args.command == 'ls': list_contents(config, repo) elif args.command == 'find': find(config, repo) elif args.command == 'rm': remove_backups(config, repo)