ZFS Automatic Snapshot Daemon
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

138 lines
4.5 KiB

from sys import stdout, stderr
import signal
import time
from functools import partial, reduce
from itertools import islice
import asyncio
from subprocess import run, PIPE
from datetime import datetime, timezone, timedelta
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from zasd.apscheduler import *
from zasd.asyncio import *
from zasd.config import *
from zasd.zfs import *
from zasd.fs import *
from zasd.log import *
class ZASD():
def __init__(self):
self.event_loop = asyncio.get_event_loop()
self.event_loop.add_signal_handler(signal.SIGINT,
partial(self.signal_handler, 'SIGINT'))
self.event_loop.add_signal_handler(signal.SIGTERM,
partial(self.signal_handler, 'SIGTERM'))
global config
config = load_config()
configure_logging()
self.zfs = ZFS(config['zfs_path'])
log.info('Processing jobs')
# Load and activate snapshot schedules
self.scheduler = AsyncIOPriorityScheduler(
event_loop = self.event_loop,
executors = {'default': AsyncIOPriorityExecutor()})
self.load_schedules()
self.scheduler.start()
spinner = Spinner()
self.event_loop.create_task(spinner.spin)
try:
self.event_loop.run_forever()
finally:
log.info('Terminating')
print(file=stderr)
self.event_loop.close()
def signal_handler(self, signame):
log.info('Received %s', signame)
self.event_loop.stop()
def load_schedules(self):
for schedule in self.schedules():
if schedule['disabled']:
continue
tag = schedule['tag']
for fs in schedule['filesystems']:
self.scheduler.add_job(lambda: self.snapshot_creation_task,
trigger = schedule['trigger'],
id = '{}:{}'.format(fs, tag),
group = fs,
priority = schedule['priority'],
args = [schedule, fs])
# Set tag-modified flags on filesystems (always take snapshots on startup)
for name in schedule['filesystems']:
filesystem = self.zfs.filesystems(name)
filesystem.modified(tag, True)
self.scheduler.add_job(self.snapshot_destruction_task,
trigger = config['destroy_trigger'],
id = 'destroy',
group = 'destroy')
# Retrieve all schedules and merge with default schedule
def schedules(self):
schedules = ({**config['defaults'], **dict(s)} for
s in config['schedules'])
return schedules
#
# Snapshot scheduling functions
# Create snapshot from a snapshot schedule
async def snapshot_creation_task(self, schedule, fs):
tag = schedule['tag']
recursive = schedule['recursive']
if not schedule['if_modified'] or fs.was_modifed():
# Clear tag-modified flags for this tag on filesystem
fs.clear_modified(tag)
log.info('Taking snapshot of filesystem %s on schedule %s', fs, tag)
# Create stub snapshot record and take the snapshot
snapshot = dict(dataset=fs, tag=tag)
await self.zfs.create_snapshot(snapshot, recursive=recursive)
# Destroy all expired snapshots
async def snapshot_destruction_task(self):
snapshots = self.zfs.snapshots()
for schedule in self.schedules():
if schedule['disabled']:
continue
# Find expired snapshots for schedule
tag = schedule['tag']
expired = slice_snapshots(snapshots, tag, index=schedule['keep'], stop=None, reverse=True)
if len(expired) > 0:
log.info('Destroying snapsnots with tag %s:', tag)
for snapshot in expired:
log.info('%s%s',
config['tab_size'] * ' ', snapshot['name'])
await self.zfs.destroy_snapshot(snapshot)
# Class for printing idle spinner
class Spinner():
CHARS = ['|', '/', '-', '\\']
counter = 0
def spin(self):
while True:
print(self.CHARS[self.counter] + '\x1B[G', end='', file=stderr, flush=True)
self.counter = (self.counter + 1) % len(self.CHARS)
await asyncio.sleep(1)