Compare commits

...

6 Commits

  1. 19
      README.md
  2. 12
      docs/zasd.example.service
  3. 10
      src/zasd.conf.example.py
  4. 40
      src/zasd.py
  5. 1
      src/zasd/config.py

@ -5,12 +5,14 @@ snaphots of ZFS datasets (pools, filsystems) according to a set of
configurable schedules. Options for interval and crontab-style
schedules are available.
The daemon only includes an installer for macOS at the moment,
but a multi-platform installer is coming soon.
The daemon includes an installer for macOS `launchd`. For Linux,
see the example `systemd` config file in `docs`. Modify as needed,
copy to `/etc/systemd/system` folder, run `systemctl start zasd`
and `systemctl enable zasd` as root to run on boot.
As of now, snapshots are conditional. A snapshot will only be taken
if a dataset has been modified since the last snapshot. An option
to turn this off per schedule is coming soon.
Snapshots may be made conditional with the `if_modified` option
on each schedule. In this mode, a snapshot will only be taken if
a dataset has been modified since the last snapshot.
The daemon should not interfere with other snapshots you have
on the system, unless you give them names that adhere to the
@ -20,4 +22,9 @@ using similar syntax for something else, you may need to change
the `separator` setting in `zasd.conf.py`.
**DISCLAIMER:** This program is still in beta. Use at your own
risk. It works fine on my system, but your mileage may vary.
risk. It works fine on my system, but your mileage may vary.
## Requirements
* `apscheduler` PIP package
* `fswatch` program

@ -0,0 +1,12 @@
[Unit]
Description=ZFS Automatic Snapshot Daemon
[Service]
User=root
WorkingDirectory=/root/zasd/src
ExecStart=/usr/bin/python3 zasd.py
Restart=always
SyslogIdentifier=zasd
[Install]
WantedBy=multi-user.target

@ -1,5 +1,5 @@
#zfs_path = '/usr/local/bin/zfs'
#fswatch_path = '/usr/local/bin/fswatch'
#zfs_path = '/sbin/zfs'
#fswatch_path = '/usr/bin/fswatch'
# Tab size for indented log messages
#tab_size = 2
@ -34,6 +34,12 @@ defaults = dict(
# Whether to ignore triggers on filesystems whose files
# have not been modifed since the trigger was last fired
# Enabling this for a filesystem will cause an instance
# of fswatch to launch for every ZFS filesystem. This
# may use a lot of open file descriptors, so maybe avoid
# if you have large filesystems with lots of folders.
# Taking snapshots unconditionally is probably more
# reliable.
# if_modified = False,
# Priority for snapshot; if more than one trigger fires

@ -121,10 +121,12 @@ def get_snapshot_zfs_name(snapshot):
# Generate ZFS identifier string from arguments
def make_snapshot_zfs_name(dataset, tag_or_snapshot, serial=None):
global config
if serial is None:
return '{}@{}'.format(dataset, tag_or_snapshot)
else:
return '{}@{}:{}'.format(dataset, tag_or_snapshot, serial)
return '{}@{}{}{}'.format(dataset, tag_or_snapshot, config['separator'], serial)
#
# Configuration functions
@ -199,7 +201,7 @@ async def snapshot_creation_task(schedule, fs):
serial = make_snapshot_serial()
recursive = schedule['recursive']
if get_fs_flag(fs, tag):
if not schedule['if_modified'] or get_fs_flag(fs, tag):
# Clear tag-modified flags for this tag on filesystem
clear_fs_flag(fs, tag)
@ -239,13 +241,21 @@ def snapshot_matches_tag(snapshot, tag):
# Get tag of snapshot
def get_snapshot_tag(snapshot):
(tag, serial) = get_snapshot_fields(snapshot)
return tag
fields = get_snapshot_fields(snapshot)
if len(fields) == 2:
(tag, serial) = fields
return tag
else:
return ''
# Get serial number of snapshot
def get_snapshot_serial(snapshot):
(tag, serial) = get_snapshot_fields(snapshot)
return serial
fields = get_snapshot_fields(snapshot)
if len(fields) == 2:
(tag, serial) = fields
return serial
else:
return ''
# Get tuple of fields in snapshot name
def get_snapshot_fields(snapshot):
@ -302,18 +312,24 @@ def load_snapshot_schedules():
async def main_task():
global config, event_loop, scheduler, fs_listeners
scheduler = AsyncIOPriorityScheduler(
event_loop = event_loop,
executors = {'default': AsyncIOPriorityExecutor()})
monitor = False
for schedule in get_schedules():
if schedule['if_modified']:
monitor = True
# Watch file system mountpoints
fs_listeners = dict()
for fs in zfs_get_filesystems():
await event_loop.subprocess_exec(
lambda: FSWatchProtocol(fs), config['fswatch_path'], '-o', fs['mountpoint'], stdout=PIPE)
if monitor:
logger.info('Starting file system watcher')
fs_listeners = dict()
for fs in zfs_get_filesystems():
await event_loop.subprocess_exec(
lambda: FSWatchProtocol(fs), config['fswatch_path'], '-o', fs['mountpoint'], stdout=PIPE)
load_snapshot_schedules()
scheduler.start()

@ -44,6 +44,7 @@ DEFAULT_CONFIG = {
'disabled': False,
'filesystems': ['tank'],
'recursive': True,
'if_modified': False,
'tag': 'zasd',
'trigger': IntervalTrigger(hours=12),
'priority': 1,

Loading…
Cancel
Save