Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
psy0rz committed Oct 6, 2024
1 parent 2f4ea79 commit d283da8
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 22 deletions.
9 changes: 5 additions & 4 deletions zfs_autobackup/ZfsAutobackup.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,9 +357,9 @@ def check_target_names(self, source_node, source_datasets, target_node):
target_datasets[target_name] = source_dataset

# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node, bookmark_tag):
def sync_datasets(self, source_node, source_datasets, target_node, bookmark_name):
"""Sync datasets, or thin-only on both sides
:type bookmark_tag: str
:type bookmark_name: str
:type target_node: ZfsNode
:type source_datasets: list of ZfsDataset
:type source_node: ZfsNode
Expand Down Expand Up @@ -421,7 +421,7 @@ def sync_datasets(self, source_node, source_datasets, target_node, bookmark_tag)
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force,
guid_check=not self.args.no_guid_check, use_bookmarks=use_bookmarks,
bookmark_tag=bookmark_tag)
bookmark_name=bookmark_name)
except Exception as e:

fail_count = fail_count + 1
Expand Down Expand Up @@ -558,7 +558,8 @@ def run(self):
fail_count = self.sync_datasets(
source_node=source_node,
source_datasets=source_datasets,
target_node=target_node, bookmark_tag=target_dataset.properties['guid'])
target_node=target_node,
bookmark_name=self.args.backup_name + self.tag_seperator + target_dataset.properties['guid'])

# no target specified, run in snapshot-only mode
else:
Expand Down
48 changes: 30 additions & 18 deletions zfs_autobackup/ZfsDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1008,32 +1008,40 @@ def thin(self, skip_holds=False):
obsolete.destroy()
self.snapshots.remove(obsolete)

def find_common_snapshot(self, target_dataset, guid_check):
def find_common_snapshot(self, target_dataset, guid_check, bookmark_name):
"""find latest common snapshot/bookmark between us and target returns None if its
an initial transfer. It preffers bookmarks over snapshots on the source side. Target side will always be a snapshots.
an initial transfer.
On the source it prefers the specified bookmark_name
Args:
:rtype: ZfsDataset|None
:type guid_check: bool
:type target_dataset: ZfsDataset
:type preferred_bookmark: str
"""

bookmark = self.zfs_node.get_dataset(bookmark_name)

if not target_dataset.exists or not target_dataset.snapshots:
# target has nothing yet
return None
else:
for target_snapshot in reversed(target_dataset.snapshots):

# Source bookmark?
source_bookmark = self.find_bookmark(target_snapshot)
if source_bookmark:
if guid_check and source_bookmark.properties['guid'] != target_snapshot.properties['guid']:
source_bookmark.warning("Bookmark has mismatching GUID, ignoring.")
else:
source_bookmark.debug("Common bookmark")
return source_bookmark

# Source snapshot?
# Source bookmark with same suffix?
# source_bookmark = self.find_bookmark(target_snapshot)
# if source_bookmark:
# if guid_check and source_bookmark.properties['guid'] != target_snapshot.properties['guid']:
# source_bookmark.warning("Bookmark has mismatching GUID, ignoring.")
# else:
# source_bookmark.debug("Common bookmark")
# return source_bookmark
if bookmark.exists and bookmark.properties['guid'] == target_snapshot.properties['guid']:XXX wil eigenlijk guid check opineel houden .dus bookmark name word snapshotname_targetdatasetguid
return bookmark

# Source snapshot with same suffix?
source_snapshot = self.find_snapshot(target_snapshot)
if source_snapshot:
if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']:
Expand Down Expand Up @@ -1138,7 +1146,7 @@ def _validate_resume_token(self, target_dataset, start_snapshot):
else:
return resume_token

def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, bookmark_name):
"""Determine at what snapshot to start syncing to target_dataset and what to sync and what to keep.
Args:
Expand All @@ -1147,6 +1155,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
:type also_other_snapshots: bool
:type guid_check: bool
:type raw: bool
:type bookmark_name: str
Returns:
tuple: A tuple containing:
Expand All @@ -1156,11 +1165,14 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
- list[ZfsDataset]: Transfer target snapshots. These need to be transferred.
- list[ZfsDataset]: Incompatible target snapshots. Target snapshots that are in the way, after the common snapshot. (need to be destroyed to continue)
"""

### 1: determine common and start snapshot

target_dataset.debug("Determining start snapshot")
source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check)
source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check,
bookmark_name=bookmark_name)
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw)

# let thinner decide whats obsolete on source after the transfer is done
Expand Down Expand Up @@ -1239,7 +1251,7 @@ def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_i
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check,
use_bookmarks, bookmark_tag):
use_bookmarks, bookmark_name):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
Expand All @@ -1259,7 +1271,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
:type no_send: bool
:type guid_check: bool
:type use_bookmarks: bool
:type bookmark_tag: str
:type bookmark_name: str
"""

# self.verbose("-> {}".format(target_dataset))
Expand All @@ -1282,7 +1294,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
(source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
guid_check=guid_check, raw=raw)
guid_check=guid_check, raw=raw, bookmark_name=bookmark_name)

# NOTE: we do a pre-clean because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
Expand Down Expand Up @@ -1348,7 +1360,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert

# bookmark common snapshot on source, or use holds if bookmarks are not enabled.
if use_bookmarks:
source_bookmark = source_snapshot.bookmark(bookmark_tag)
source_bookmark = source_snapshot.bookmark(bookmark_name)
# note: destroy source_snapshot when obsolete at this point?
else:
source_bookmark = None
Expand Down
1 change: 1 addition & 0 deletions zfs_autobackup/ZfsNode.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def get_pool(self, dataset):

def get_dataset(self, name, force_exists=None):
"""get a ZfsDataset() object from name. stores objects internally to enable caching
:type name: str
:rtype: ZfsDataset
"""

Expand Down

0 comments on commit d283da8

Please sign in to comment.