From d283da88751ae58918f0ed5b374bca388ecc7d26 Mon Sep 17 00:00:00 2001 From: Edwin Eefting Date: Sun, 6 Oct 2024 11:19:03 +0200 Subject: [PATCH] wip --- zfs_autobackup/ZfsAutobackup.py | 9 ++++--- zfs_autobackup/ZfsDataset.py | 48 ++++++++++++++++++++------------- zfs_autobackup/ZfsNode.py | 1 + 3 files changed, 36 insertions(+), 22 deletions(-) diff --git a/zfs_autobackup/ZfsAutobackup.py b/zfs_autobackup/ZfsAutobackup.py index 234dc37..dbafadc 100644 --- a/zfs_autobackup/ZfsAutobackup.py +++ b/zfs_autobackup/ZfsAutobackup.py @@ -357,9 +357,9 @@ def check_target_names(self, source_node, source_datasets, target_node): target_datasets[target_name] = source_dataset # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: - def sync_datasets(self, source_node, source_datasets, target_node, bookmark_tag): + def sync_datasets(self, source_node, source_datasets, target_node, bookmark_name): """Sync datasets, or thin-only on both sides - :type bookmark_tag: str + :type bookmark_name: str :type target_node: ZfsNode :type source_datasets: list of ZfsDataset :type source_node: ZfsNode @@ -421,7 +421,7 @@ def sync_datasets(self, source_node, source_datasets, target_node, bookmark_tag) decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check, use_bookmarks=use_bookmarks, - bookmark_tag=bookmark_tag) + bookmark_name=bookmark_name) except Exception as e: fail_count = fail_count + 1 @@ -558,7 +558,8 @@ def run(self): fail_count = self.sync_datasets( source_node=source_node, source_datasets=source_datasets, - target_node=target_node, bookmark_tag=target_dataset.properties['guid']) + target_node=target_node, + bookmark_name=self.args.backup_name + self.tag_seperator + target_dataset.properties['guid']) # no target specified, run in snapshot-only mode else: diff --git a/zfs_autobackup/ZfsDataset.py b/zfs_autobackup/ZfsDataset.py index 3c307ff..51f6a37 100644 --- a/zfs_autobackup/ZfsDataset.py +++ b/zfs_autobackup/ZfsDataset.py @@ -1008,32 +1008,40 @@ def thin(self, skip_holds=False): obsolete.destroy() self.snapshots.remove(obsolete) - def find_common_snapshot(self, target_dataset, guid_check): + def find_common_snapshot(self, target_dataset, guid_check, bookmark_name): """find latest common snapshot/bookmark between us and target returns None if its - an initial transfer. It preffers bookmarks over snapshots on the source side. Target side will always be a snapshots. + an initial transfer. + + On the source it prefers the specified bookmark_name + Args: :rtype: ZfsDataset|None :type guid_check: bool :type target_dataset: ZfsDataset + :type preferred_bookmark: str """ + bookmark = self.zfs_node.get_dataset(bookmark_name) + if not target_dataset.exists or not target_dataset.snapshots: # target has nothing yet return None else: for target_snapshot in reversed(target_dataset.snapshots): - # Source bookmark? - source_bookmark = self.find_bookmark(target_snapshot) - if source_bookmark: - if guid_check and source_bookmark.properties['guid'] != target_snapshot.properties['guid']: - source_bookmark.warning("Bookmark has mismatching GUID, ignoring.") - else: - source_bookmark.debug("Common bookmark") - return source_bookmark - - # Source snapshot? + # Source bookmark with same suffix? + # source_bookmark = self.find_bookmark(target_snapshot) + # if source_bookmark: + # if guid_check and source_bookmark.properties['guid'] != target_snapshot.properties['guid']: + # source_bookmark.warning("Bookmark has mismatching GUID, ignoring.") + # else: + # source_bookmark.debug("Common bookmark") + # return source_bookmark + if bookmark.exists and bookmark.properties['guid'] == target_snapshot.properties['guid']:XXX wil eigenlijk guid check opineel houden .dus bookmark name word snapshotname_targetdatasetguid + return bookmark + + # Source snapshot with same suffix? source_snapshot = self.find_snapshot(target_snapshot) if source_snapshot: if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']: @@ -1138,7 +1146,7 @@ def _validate_resume_token(self, target_dataset, start_snapshot): else: return resume_token - def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw): + def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, bookmark_name): """Determine at what snapshot to start syncing to target_dataset and what to sync and what to keep. Args: @@ -1147,6 +1155,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw): :type also_other_snapshots: bool :type guid_check: bool :type raw: bool + :type bookmark_name: str Returns: tuple: A tuple containing: @@ -1156,11 +1165,14 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw): - list[ZfsDataset]: Transfer target snapshots. These need to be transferred. - list[ZfsDataset]: Incompatible target snapshots. Target snapshots that are in the way, after the common snapshot. (need to be destroyed to continue) + """ ### 1: determine common and start snapshot + target_dataset.debug("Determining start snapshot") - source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check) + source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check, + bookmark_name=bookmark_name) incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw) # let thinner decide whats obsolete on source after the transfer is done @@ -1239,7 +1251,7 @@ def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_i def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check, - use_bookmarks, bookmark_tag): + use_bookmarks, bookmark_name): """sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way. @@ -1259,7 +1271,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert :type no_send: bool :type guid_check: bool :type use_bookmarks: bool - :type bookmark_tag: str + :type bookmark_name: str """ # self.verbose("-> {}".format(target_dataset)) @@ -1282,7 +1294,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert (source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers, incompatible_target_snapshots) = \ self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots, - guid_check=guid_check, raw=raw) + guid_check=guid_check, raw=raw, bookmark_name=bookmark_name) # NOTE: we do a pre-clean because we dont want filesystems to fillup when backups keep failing. # Also usefull with no_send to still cleanup stuff. @@ -1348,7 +1360,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert # bookmark common snapshot on source, or use holds if bookmarks are not enabled. if use_bookmarks: - source_bookmark = source_snapshot.bookmark(bookmark_tag) + source_bookmark = source_snapshot.bookmark(bookmark_name) # note: destroy source_snapshot when obsolete at this point? else: source_bookmark = None diff --git a/zfs_autobackup/ZfsNode.py b/zfs_autobackup/ZfsNode.py index be9b8b2..7d91c68 100644 --- a/zfs_autobackup/ZfsNode.py +++ b/zfs_autobackup/ZfsNode.py @@ -129,6 +129,7 @@ def get_pool(self, dataset): def get_dataset(self, name, force_exists=None): """get a ZfsDataset() object from name. stores objects internally to enable caching + :type name: str :rtype: ZfsDataset """