#!/usr/bin/env python # backup.py - script for efficient backup # # Copyright (C) 2015 Arthur de Jong # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # The files produced as output from the software do not automatically fall # under the copyright of the software, unless explicitly stated otherwise. import collections import datetime import itertools import json import os import os.path import random import re import socket import sys import tarfile import tempfile class Config(object): # maximum size in backed up bytes per archive # (size may be smaller or slightly larger due to how files are spread # over archives) block_size = 10 * 1024 * 1024 # minimal number of files per archive (set to 0 to disable) # (this overrides block_size) archive_min_files = 10 # directory that will contain the local meta-data cache cache_dir = None # do not use existing archives that have less than this percentage # of usable data (relative to block_size and archive total size) archive_min_perc = 40 # use an extractlist for archives that are only effectively used # below this percentage archive_use_extractlist = 50 compression = 'gzip' encryption = 'gpg' class TarWriter(object): def __init__(self, repo, db, compression, encryption): from repo import generate_archive_name self.repo = repo self.db = db self.compression = compression self.encryption = encryption self.name = generate_archive_name() self.fileobj = repo.write_file( 'archives/%s.tar' % self.name, encryption=encryption, compression=compression) self.tar = tarfile.open( fileobj=self.fileobj, mode='w|', encoding='utf-8') self.size = 0 self.nfiles = 0 self.directories = set() def _add(self, path, size=None, meta=None): self.tar.add(path, recursive=False) if size is None or meta is None: cursor = self.db.connection.execute(''' SELECT `size`, `meta` FROM `crawled` WHERE `path` = ? ''', (path, )) (size, meta) = cursor.fetchone() self.db.connection.execute(''' INSERT INTO `archive_contents` (`archive`, `path`, `size`, `meta`) VALUES (?, ?, ?, ?) ''', (self.name, path, size, meta)) def _add_parents(self, path): """Ensure that directory entries are correctly created in the archive.""" if os.path.isdir(path): self.directories.add(path) parent = os.path.dirname(path) if parent not in self.directories: if self.db.is_crawled(parent): self._add_parents(parent) self._add(parent) self.directories.add(parent) def add(self, path, size, meta): """Add the specified path to the archive.""" self._add_parents(path) self._add(path, size, meta) self.size += size if size > 1: self.nfiles += 1 def _write_metadata(self): cursor = self.db.connection.execute(''' SELECT `meta` FROM `archive_contents` WHERE `archive` = ? ORDER BY `id` ''', (self.name, )) with self.repo.write_file( 'archives/%s.json' % self.name, encryption=self.encryption, compression=self.compression) as f: f.write('['.encode('utf-8')) first = True for meta, in cursor: if first: f.write(('\n' + meta).encode('utf-8')) else: f.write((',\n' + meta).encode('utf-8')) first = False f.write('\n]\n'.encode('utf-8')) def close(self): self.tar.close() self.fileobj.close() self._write_metadata() def build_extractlist(db, archive): """Build an extractlist for the specified archive. The `archive_contents` table is expected to have files to be extracted marked as `used` >= 0.""" from path import parents # Some versions of tar recursively extract directories and some don't # when a directory is included in the extractlist. This means we should # only include directory entries if all files in the archive under that # directory should be expanded. # select all paths from the archive that we do not want to extract cursor = db.connection.execute(''' SELECT `path` FROM `archive_contents` WHERE `archive` = ? AND `used` < 0 ''', (archive, )) # and get the list of parent directories for those paths notextract = set() for path, in cursor: notextract.update(parents(path)) # do not include those directories in the extractlist because that # would result in extracting unwanted files notextract = list(notextract) for i in range(0, len(notextract), 500): chunk = notextract[i:i + 500] db.connection.execute(''' UPDATE `archive_contents` SET `used` = -1 WHERE `archive` = ? AND `path` IN (%s) ''' % ', '.join('?' * len(chunk)), [archive, ] + chunk) # return the list of remaining paths to extract cursor = db.connection.execute(''' SELECT `path` FROM `archive_contents` WHERE `archive` = ? AND `used` >= 0 ORDER BY `id` ''', (archive, )) return (path for path, in cursor) def select_archives(config, db): """Return a list of existing archives that can be re-used for this backup run.""" # clear archive usage with db.connection: db.connection.execute(''' UPDATE `archive_contents` SET `used` = -1 ''') # set used based on information in table with db.connection: db.connection.execute(''' UPDATE `archive_contents` SET `used` = `size` WHERE `meta` IN (SELECT `meta` FROM `crawled`) ''') # find archives that contain at least archive_min_perc% useful files cursor = db.connection.execute(''' SELECT `archive`, SUM(CASE WHEN `used` >= 0 THEN `used` ELSE 0 END) AS `totused`, SUM(`size`) AS `totsize` FROM `archive_contents` GROUP BY `archive` HAVING `totused` > ? AND (`totused` * 100) > (`totsize` * ?) ORDER BY `archive` ASC ''', (int((config.block_size * config.archive_min_perc) / 100), config.archive_min_perc)) used_archives = [] for archive, used, size in cursor: fill = 100 * used // size use_extractlist = False # if the percentage used is low enough or if extracting the full # archive will cause problems, use an extractlist if fill < config.archive_use_extractlist and not config.no_extractlists: use_extractlist = True else: # check if the archive contains files that should not be extracted # for this backup (in this case an extractlist is mandatory) cursor = db.connection.execute(''' SELECT 1 FROM `archive_contents` WHERE `archive` = ? AND `path` NOT IN ( SELECT `path` FROM `crawled`) LIMIT 1 ''', (archive, )) if bool(list(cursor)): use_extractlist = True if not use_extractlist or not config.no_extractlists: extractlist = None if use_extractlist: extractlist = list(build_extractlist(db, archive)) print('Will use %3s%% (%s/%s) of %s' % (fill, used, size, archive)) used_archives.append((archive, extractlist)) return used_archives def update_dumped(db, used_archives): """Mark crawled files as dumped based on the provided archives.""" # go over the archives and reconstruct the to be restored tree so we can # check for any crawled files that remain to be dumped # (this can happen with files extracted from the extractlist before their # directory entries are present or when a later used archive includes an # incorrect file) db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_restored` ( `path` TEXT PRIMARY KEY, `meta` TEXT NOT NULL ); ''') for archive, extractlist in used_archives: with db.connection: if not extractlist: extractlist = [] db.connection.execute(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) SELECT `path`, `meta` FROM `archive_contents` WHERE `archive` = ? ''', (archive, )) else: # TODO: if extractlist contains a directory, it should be restored recursively for i in range(0, len(extractlist), 500): chunk = extractlist[i:i + 500] db.connection.execute(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) SELECT `path`, `meta` FROM `archive_contents` WHERE `archive` = ? AND `path` IN (%s) ''' % ', '.join('?' * len(chunk)), [archive, ] + chunk) # any parent directory timestamp will be updated when extracting dirs = set(os.path.dirname(path) for path in extractlist) db.connection.executemany(''' INSERT OR REPLACE INTO `tmp_restored` (`path`, `meta`) VALUES (?, '') ''', ((path, ) for path in dirs)) # update crawled table with db.connection: db.connection.execute(''' UPDATE `crawled` SET `dumped` = 1 WHERE `meta` IN ( SELECT `meta` FROM `tmp_restored`) ''') # drop restored table db.connection.executescript(''' DROP TABLE `tmp_restored`; ''') def create_archives(config, repo, db, compression, encryption, used_archives): """Create any archives for crawled, undumped files.""" # get the files that have not yet been marked as part of an archive cursor = db.connection.execute(''' SELECT `path`, `size`, `meta` FROM `crawled` WHERE `dumped` = 0 ORDER BY `id` ''') archive = None # use fetchall because SQLite cannot handle partial reads from a cursor # if the database is being modified in another cursor for path, size, meta in cursor.fetchall(): if (archive and archive.nfiles >= config.archive_min_files and archive.size + size >= config.block_size): archive.close() used_archives.append((archive.name, None)) archive = None if archive is None: archive = TarWriter(repo, db, compression, encryption) print('Creating %s' % archive.name) try: archive.add(path, size, meta) except (EnvironmentError, OSError): import traceback print(traceback.format_exc()) if archive: archive.close() used_archives.append((archive.name, None)) def write_restore_sh(fp, backup, used_archives): import textwrap fp.write(textwrap.dedent(''' #!/bin/sh # restore.sh - script to do a full restore from backup %s umask 077 dir=`dirname "$0"` REPO=`cd "$dir/../.." && pwd` EXTMPDIR="/tmp/extract.$$" mkdir "$EXTMPDIR" || exit 1 ''' % backup).lstrip().encode('utf-8')) def x(filename): filename, filters = repo.expand(filename) out = [] for n, f in enumerate(filters): if n == 0: out.append('%s < "$REPO"/%s' % (f.restore_cmd, filename)) else: out.append(' | %s' % f.restore_cmd) return ''.join(out) # decrypt the passphrase file if it is there if repo.backend.exists(repo.expand('keys/passphrase')[0]): from filters import GnuPGKeyEncryption # TODO: if supporting multiple keys decrypt any keys that # are used by archives filename, filters = repo.expand('keys/passphrase') fp.write(textwrap.dedent(''' # decrypt passphrase %s < "$REPO"/%s > "$EXTMPDIR"/passphrase ''' % ( GnuPGKeyEncryption.restore_cmd, filename)).lstrip().encode('utf-8')) fp.write('# unpack archives\n'.encode('utf-8')) for archive, extractlist in used_archives: if extractlist: # write code for extracting the extractlist fp.write( ('%s > "$EXTMPDIR"/%s.list\n' % ( x('backups/%s/%s.list' % (backup, archive)), archive)).encode('utf-8')) # write code for unpacking with extractlist fp.write( ('%s | tar -xpf - -T "$EXTMPDIR"/%s.list\n' % ( x('archives/%s.tar' % archive), archive)).encode('utf-8')) else: fp.write( ('%s | tar -xpf -\n' % x('archives/%s.tar' % archive)).encode('utf-8')) fp.write(textwrap.dedent(''' # clean up rm -rf "$EXTMPDIR" exit 0 ''').encode('utf-8')) # TODO: perhaps write a detached signature for restore.sh # (write restore.sh to cache first, then sign, then to repository) # write metadata-dump for backup (aka snapshot, aka generation) def backup(config, repo): from cache import MetaData from crawler import crawl from filters import ( GzipCompression, Bzip2Compression, XZCompression, GnuPGEncryption, NoEncryption) from repo import generate_backup_name # configure compression and encryption if config.compression == 'none': compression = NoEncryption() elif config.compression == 'bzip2': compression = Bzip2Compression() elif config.compression == 'xz': compression = XZCompression() else: compression = GzipCompression() if config.encryption == 'none': encryption = NoEncryption() else: encryption = GnuPGEncryption(repo) # check existance of passphrase if repo.backend.exists(repo.expand('keys/passphrase')[0]): try: repo.get_passphrase() except EnvironmentError: print('Use set-keys first') return # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) db.resync_archives(repo) # prepare the name of the backup backup = generate_backup_name() # TODO: or have some way to specify it on the command-line try: from dateutil.tz import tzlocal start_time = datetime.datetime.now(tzlocal()) except ImportError: start_time = datetime.datetime.now() # build a list of excludes excludes = getattr(config, 'excludes', []) for excludefile in getattr(config, 'exclude_files', ()): with open(excludefile, 'r') as f: for line in f: line = line.rstrip('\n\r') if not line or line.startswith(';') or line.startswith('#'): continue excludes.append(line) # fill the crawled table with information from the filesystem db.connection.executescript(''' CREATE TEMPORARY TABLE `crawled` ( `id` INTEGER PRIMARY KEY, `path` TEXT NOT NULL, `size` INTEGER NOT NULL, `meta` TEXT NOT NULL, `dumped` BOOLEAN NOT NULL DEFAULT 0 ); ''') # add crawled files to the database with db.connection: db.connection.executemany(''' INSERT INTO `crawled` (`path`, `size`, `meta`) VALUES (?, ?, ?) ''', ( (meta['path'], meta['size'], json.dumps(meta, sort_keys=True)) for meta in crawl(config.files, excludes))) # create indexes after crawling db.connection.executescript(''' CREATE INDEX IF NOT EXISTS `crawled_path_idx` ON `crawled` (`path`); CREATE INDEX IF NOT EXISTS `crawled_meta_idx` ON `crawled` (`meta`); CREATE INDEX IF NOT EXISTS `crawled_dumped_idx` ON `crawled` (`dumped`); ''') # find archives with optional extractlists that will form the basis # on top of which we will create additional arhives with db.connection: used_archives = select_archives(config, db) # mark crawled files as dumped based on the provided archives update_dumped(db, used_archives) # create new archives of as of yet undumped files # (this appends to used_archives) with db.connection: create_archives( config, repo, db, compression, encryption, used_archives) # write files list for this backup cursor = db.connection.execute(''' SELECT `meta` FROM `crawled` ORDER BY `id` ''') with repo.write_file( 'backups/%s/files.json' % backup, encryption=encryption, compression=compression) as f: f.write('['.encode('utf-8')) first = True for meta, in cursor: if first: f.write(('\n' + meta).encode('utf-8')) else: f.write((',\n' + meta).encode('utf-8')) first = False f.write('\n]\n'.encode('utf-8')) # write out extractlists for archive, extractlist in used_archives: if extractlist: with repo.write_file( 'backups/%s/%s.list' % (backup, archive), encryption=encryption, compression=compression) as f: for path in extractlist: f.write(('%s\n' % path.lstrip('/')).encode('utf-8')) # create restore script and backup files with repo.write_file( 'backups/%s/restore.sh' % backup, executable=True) as f: write_restore_sh(f, backup, used_archives) # get statistics from crawled files, size = db.connection.execute(''' SELECT COUNT(*), SUM(`size`) FROM `crawled` ''').fetchone() # write meta-data try: from dateutil.tz import tzlocal end_time = datetime.datetime.now(tzlocal()) except ImportError: end_time = datetime.datetime.now() info = dict( backup=backup, start_time=start_time.isoformat(), end_time=end_time.isoformat(), files=files, size=size, hostname=socket.gethostname(), paths=args.files, archives=[archive for archive, extractlist in used_archives], extractlists=[ archive for archive, extractlist in used_archives if extractlist], ) with repo.write_file( 'backups/%s/info.json' % backup, encryption=encryption, compression=compression) as f: f.write(json.dumps(info, indent=1).encode('utf-8')) # store backup information in local cache with db.connection: db.connection.execute(''' INSERT OR REPLACE INTO `backups` (`backup`, `json`) VALUES (?, ?) ''', ( backup, json.dumps(info, sort_keys=True))) def set_keys(config, repo): passphrase = repo.get_or_create_passphrase() # TODO: read existing keys from the REPO and add config.keys repo.write_passphrase(passphrase, config.keys) def list_backups(config, repo): """List backups in the repository and print information.""" from cache import MetaData import dateutil.parser # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) db.resync_backups(repo) # list backups cursor = db.connection.execute(''' SELECT `backup`, `json` FROM `backups` ORDER BY `backup` ''') for backup, info in cursor: info = json.loads(info) start_time = info.get('start_time', '') if start_time: start_time = dateutil.parser.parse(start_time) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() end_time = info.get('end_time', '') if end_time: end_time = dateutil.parser.parse(end_time) end_time = end_time.strftime('%Y-%m-%d %H:%M:%S %z').strip() print('%s %s .. %s' % ( os.path.basename(backup), start_time, end_time)) hostname = info.get('hostname') for path in info.get('paths', ()): print(' %s%s%s' % (hostname, ':' if hostname else '', path)) extra = [] if 'files' in info: extra.append('%d files' % info['files']) if 'size' in info: extra.append('%d bytes' % info['size']) if 'archives' in info: extra.append('%d archives used' % len(info['archives'])) if extra: print(' %s' % ', '.join(extra)) def _pattern_match(meta, patterns): return not patterns or \ meta['path'] in patterns or \ any(meta['path'].startswith(pattern + '/') for pattern in patterns) def list_contents(config, repo): """List backup contents and print file listing.""" from ls import ls_format patterns = [pattern.rstrip('/') for pattern in config.files] print('%s:' % config.backup) for meta in repo.read_fileslist('backups/%s/files.json' % config.backup): if _pattern_match(meta, patterns): ls_format(meta) def find(config, repo): """Find archives containing the files.""" from ls import ls_format from path import pattern2re patterns = [ (pattern2re(pattern.rstrip('/')), pattern.endswith('/')) for pattern in config.files] for backup in repo.list_backups(): print('%s:' % backup) for meta in repo.read_fileslist('backups/%s/files.json' % backup): ok = False if any(pattern.match(meta['path']) and ( (not is_dir) or (meta['type'] == 'D')) for pattern, is_dir in patterns): ls_format(meta) def restore(config, repo): """Restore files from a backup in the repository.""" from cache import MetaData from filters import GnuPGKeyEncryption, Reader repo.keyencryption = GnuPGKeyEncryption() backup = config.backup # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) db.resync_backups(repo) db.resync_archives(repo) # get list of needed archives cursor = db.connection.execute(''' SELECT `json` FROM `backups` WHERE `backup` = ? ''', (backup, )) info = json.loads(cursor.fetchone()[0]) archives = info['archives'] # build a list of files to restore db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_torestore` ( `meta` TEXT NOT NULL ); ''') patterns = [pattern.rstrip('/') for pattern in config.files] print('%s: reading files list' % config.backup) fileslist = repo.read_fileslist('backups/%s/files.json' % backup) with db.connection: db.connection.executemany(''' INSERT INTO `tmp_torestore` (`meta`) VALUES (?) ''', ( (json.dumps(meta, sort_keys=True),) for meta in fileslist if _pattern_match(meta, patterns))) db.connection.executescript(''' CREATE INDEX IF NOT EXISTS `tmp_torestore_meta_idx` ON `tmp_torestore` (`meta`); ''') # go over the list of archives for archive in archives: # fill used column for archive db.connection.execute(''' UPDATE `archive_contents` SET `used` = -1 WHERE `archive` = ? ''', (archive, )) db.connection.execute(''' UPDATE `archive_contents` SET `used` = `size` WHERE `archive` = ? AND `meta` IN (SELECT `meta` FROM `tmp_torestore`) ''', (archive, )) # count files in the archive that can be extracted cursor = db.connection.execute(''' SELECT COUNT(*), COUNT(`tmp_torestore`.`meta`) FROM `archive_contents` LEFT JOIN `tmp_torestore` ON `archive_contents`.`meta` = `tmp_torestore`.`meta` WHERE `archive` = ? ''', (archive, )) files, extract = cursor.fetchone() # TODO: the above could probably be done more efficiently if extract == 0: # skip this archive, nothing interesting here continue # open the archive tarfile = repo.read_file('archives/%s.tar' % archive) if files == extract: # we can extract the complete archive print('Extracting %s' % archive) with Reader(['tar', '-xpf', '-'], tarfile) as f: f.read() else: # build an extractlist print('Extracting %s%% of %s' % ( (100 * extract) // files, archive)) # write the extractlist to a temporary file fd, extractfile = tempfile.mkstemp() f = os.fdopen(fd, 'w') for path in build_extractlist(db, archive): f.write('%s\n' % path.lstrip('/')) f.close() # extract the archive using the extractlist with Reader( ['tar', '-xpf', '-', '-T', extractfile], tarfile) as f: f.read() os.remove(extractfile) def remove_backups(config, repo): """Remove backups from the repository. Also removes the unused archives.""" import dateutil.parser from cache import MetaData # check command-line arguments backups = config.backups or [] # bring metadata cache in sync with repository db = MetaData(config.cache_dir, repo.uuid) db.resync_backups(repo) if config.keeps: # turn keep policy into time period -> count mapping keep_count = collections.defaultdict(int) for value in (','.join(config.keeps)).split(','): m = re.match(r'^([0-9]+)([hdwmy])$', value) keep_count[m.group(2)] = int(m.group(1)) # set up storage for seen archives seen = collections.defaultdict(list) # this is not ideal yet # because it will keep the last backups instead of the oldest # it also depends on the order of the backups in the database # (order by name) # go over the backups in the repository cursor = db.connection.execute(''' SELECT `backup`, `json` FROM `backups` ORDER BY `backup` ''') for backup, info in cursor: info = json.loads(info) start_time = info.get('start_time', '') start_time = dateutil.parser.parse(start_time) # for each of the different policy classes, determine the # unique value values = dict( h=start_time.strftime('%Y-%m-%d %H %z').strip(), d=start_time.strftime('%Y-%m-%d'), w='%s-W%02s' % start_time.isocalendar()[:2], m=start_time.strftime('%Y-%m'), y=start_time.strftime('%Y')) # for each of the policy classes record the backup for that # value in that class for pol, value in values.items(): see = seen[pol] if value not in [x for x, y in see]: see.append((value, backup)) # limit seen by number to keep and figure out which backups to keep keep_backups = set() for pol, see in seen.items(): # keep the last keep_count backups if keep_count[pol]: for value, backup in see[-keep_count[pol]:]: keep_backups.add(backup) # build list of backups to remove cursor = db.connection.execute(''' SELECT `backup` FROM `backups` ''') backups = [backup for backup, in cursor if backup not in keep_backups] # handle -n, --no-act to only print what would be removed if not backups: print('Nothing to remove') return if config.no_act: for backup in backups: print('Would remove backup %s' % backup) return # build a mapping between backups and archives db.connection.executescript(''' CREATE TEMPORARY TABLE `tmp_backup_archives` ( `backup` TEXT NOT NULL, `archive` TEXT NOT NULL ); CREATE INDEX IF NOT EXISTS `tmp_backup_archives_backup_idx` ON `tmp_backup_archives` (`backup`); CREATE INDEX IF NOT EXISTS `tmp_backup_archives_archive_idx` ON `tmp_backup_archives` (`archive`); ''') cursor = db.connection.execute(''' SELECT `backup`, `json` FROM `backups` ORDER BY `backup` ''') for backup, info in cursor.fetchall(): info = json.loads(info) archives = info['archives'] with db.connection: db.connection.executemany(''' INSERT INTO `tmp_backup_archives` (`backup`, `archive`) VALUES (?, ?) ''', ((backup, archive) for archive in archives)) # go over backups to remove for backup in backups: # remove backup directory print('Removing backup %s' % backup) repo.remove_backup(backup) # remove backup from cache with db.connection: db.connection.execute(''' DELETE FROM `backups` WHERE `backup` = ? ''', (backup, )) # find archives that are no longer used cursor = db.connection.execute(''' SELECT `archive` FROM `tmp_backup_archives` WHERE `backup` = ? AND `archive` NOT IN ( SELECT `archive` FROM `tmp_backup_archives` WHERE `backup` <> ?) ''', (backup, backup)) for archive, in cursor.fetchall(): print('Removing archive %s' % archive) repo.remove_archive(archive) # remove archive from cache with db.connection: db.connection.execute(''' DELETE FROM `archive_contents` WHERE `archive` = ? ''', (archive, )) # remove backup from mapping cache with db.connection: db.connection.execute(''' DELETE FROM `tmp_backup_archives` WHERE `backup` = ? ''', (backup, )) if __name__ == '__main__': from cmdline import parser from repo import Repository args = parser.parse_args() config = Config() for (k, v) in args.__dict__.items(): if v is not None: setattr(config, k, v) repo = Repository(config.repository) if args.command == 'backup': backup(config, repo) elif args.command == 'set-keys': set_keys(config, repo) elif args.command == 'backups': list_backups(config, repo) elif args.command == 'ls': list_contents(config, repo) elif args.command == 'find': find(config, repo) elif args.command == 'restore': restore(config, repo) elif args.command == 'rm': if bool(config.backups) == bool(config.keeps): parser.error('Either specify --keep or BACKUP') remove_backups(config, repo) elif args.command == 'fsck': from fsck import fsck fsck(config, repo)