diff --git a/tests/test_zfsautobackup34.py b/tests/test_zfsautobackup34.py index 1ac9f02..690f410 100644 --- a/tests/test_zfsautobackup34.py +++ b/tests/test_zfsautobackup34.py @@ -10,52 +10,54 @@ def setUp(self): def test_select_bookmark_or_snapshot(self): """test if zfs autobackup chooses the most recent common matching dataset when there are both bookmarks and snapshots, some with the wrong GUID""" - with mocktime("20101111000000"): - self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-holds".split(" ")).run()) - - with mocktime("20101111000001"): self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-holds".split(" ")).run()) - #OOK VERKEERDE GUID + shelltest("zfs destroy test_source2/fs2/sub@test-20101111000001") shelltest("zfs destroy test_source1/fs1/sub#test-20101111000001") + #bookmark with incorrect GUID, should fallback to snapshot + shelltest("zfs destroy test_source1/fs1#test-20101111000001") + shelltest("zfs snapshot test_source1/fs1@wrong") + shelltest("zfs bookmark test_source1/fs1@wrong \#test-20101111000001") + shelltest("zfs destroy test_source1/fs1@wrong") with mocktime("20101111000002"): - self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-holds --debug".split(" ")).run()) + self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-holds".split(" ")).run()) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) self.assertMultiLineEqual(r,""" test_source1 test_source1/fs1 -test_source1/fs1@test-20101111000000 -test_source1/fs1@invalid test_source1/fs1@test-20101111000001 +test_source1/fs1@test-20101111000002 +test_source1/fs1#test-20101111000001 +test_source1/fs1#test-20101111000002 test_source1/fs1/sub -test_source1/fs1/sub@test-20101111000000 test_source1/fs1/sub@test-20101111000001 +test_source1/fs1/sub@test-20101111000002 +test_source1/fs1/sub#test-20101111000002 test_source2 test_source2/fs2 test_source2/fs2/sub -test_source2/fs2/sub@test-20101111000000 -test_source2/fs2/sub@test-20101111000001 +test_source2/fs2/sub@test-20101111000002 +test_source2/fs2/sub#test-20101111000002 test_source2/fs3 test_source2/fs3/sub test_target1 test_target1/test_source1 test_target1/test_source1/fs1 -test_target1/test_source1/fs1@test-20101111000000 -test_target1/test_source1/fs1@invalid test_target1/test_source1/fs1@test-20101111000001 +test_target1/test_source1/fs1@test-20101111000002 test_target1/test_source1/fs1/sub -test_target1/test_source1/fs1/sub@test-20101111000000 test_target1/test_source1/fs1/sub@test-20101111000001 +test_target1/test_source1/fs1/sub@test-20101111000002 test_target1/test_source2 test_target1/test_source2/fs2 test_target1/test_source2/fs2/sub -test_target1/test_source2/fs2/sub@test-20101111000000 test_target1/test_source2/fs2/sub@test-20101111000001 +test_target1/test_source2/fs2/sub@test-20101111000002 """) diff --git a/zfs_autobackup/ZfsDataset.py b/zfs_autobackup/ZfsDataset.py index f87cf0d..c55397b 100644 --- a/zfs_autobackup/ZfsDataset.py +++ b/zfs_autobackup/ZfsDataset.py @@ -59,11 +59,11 @@ def __str__(self): return self.name - def __eq__(self, obj): - if not isinstance(obj, ZfsDataset): + def __eq__(self, dataset): + if not isinstance(dataset, ZfsDataset): return False - return self.name == obj.name + return self.name == dataset.name def verbose(self, txt): """ @@ -1089,7 +1089,7 @@ def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, t :type target_transfers: list[ZfsDataset] """ - # on source: delete all obsoletes that are not in target_transfers (except common snapshot) + # on source: delete all obsoletes that are not in target_transfers (except common snapshot, if its not a bookmark) for source_snapshot in self.snapshots: if (source_snapshot in source_obsoletes and source_common_snapshot != source_snapshot @@ -1152,10 +1152,10 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw): source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check) incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw) - # let thinner decide whats obsolete on source after the transfer is done, keeping the last snapshot as common. + # let thinner decide whats obsolete on source after the transfer is done source_obsoletes = [] if self.our_snapshots: - source_obsoletes = self.thin_list(keeps=[self.our_snapshots[-1]])[1] + source_obsoletes = self.thin_list()[1] ### 2: Determine possible target snapshots @@ -1180,11 +1180,12 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw): if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded: # create virtual target snapshot target_snapshot = target_dataset.zfs_node.get_dataset( - target_dataset.filesystem_name + "@" + source_snapshot.suffix, force_exists=False) + target_dataset.filesystem_name + source_snapshot.typed_suffix, force_exists=False) possible_target_snapshots.append(target_snapshot) source_snapshot = self.find_next_snapshot(source_snapshot) - ### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once + ### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once. + # always keep the last target snapshot as common snapshot. if possible_target_snapshots: (target_keeps, target_obsoletes) = target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[