diff --git a/README.md b/README.md index 836d65b..86d5a98 100644 --- a/README.md +++ b/README.md @@ -1 +1,46 @@ -# disk-defragmentation \ No newline at end of file +# Проект "Дефрагментатор" + +## Авторы +* Овчинников Кирилл +* Зуев Кирилл + +## Описание +Дефрагментатор - программа, которая устраняет +фрагментацию файлов на дисках для оптимизации процесса записи и чтения с диска. + +## Требования +* Python 3.8 и выше +* Для работы дефрагментатора не нужно сторонних библиотек, однако для тестирования понадобятся +сторонние библиотеки: +```pip install -r requirements.txt``` + +## Запуск +Дефрагментатор поддерживает три команды: +1. check - выводит на экран все файлы, находящиеся на диске, а также фрагментированные файлы\ +Пример использование:\ +```python main.py check <путь до образа диска> ``` +2. defragment - выполняет дефрагментацию диска\ +Пример использования:\ +```python main.py defragment <путь до образа диска> ``` +3. fragment - выполняет фрагментацию определённого файла, если его размер позволяет это сделать\ +Пример использования:\ +```python main.py fragment <путь до образа диска> <путь до файла, который будет фрагментирован>``` + +## Тесты +Код покрыт тестами, процент покрытия - 83.\ +Тесты находятся в директории ```tests``` +``` +Name Stmts Miss Cover Missing +---------------------------------------------------------------- +defragmenter\__init__.py 0 0 100% +defragmenter\bpb.py 13 0 100% +defragmenter\cluster.py 10 0 100% +defragmenter\cluster_manager.py 52 19 63% 47-59, 71-74, 86-91 +defragmenter\defragmenter.py 63 19 70% 73-88, 94-98 +defragmenter\directory_parser.py 140 22 84% 39-41, 106, 136, 191-210 +defragmenter\fat_attributes.py 9 0 100% +defragmenter\fat_reader.py 54 3 94% 38, 52-53 +defragmenter\fragmenter.py 34 0 100% +---------------------------------------------------------------- +TOTAL 375 63 83% +``` diff --git a/defragmenter/__init__.py b/defragmenter/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bpb.py b/defragmenter/bpb.py similarity index 100% rename from bpb.py rename to defragmenter/bpb.py diff --git a/Cluster.py b/defragmenter/cluster.py similarity index 100% rename from Cluster.py rename to defragmenter/cluster.py diff --git a/defragmenter/cluster_manager.py b/defragmenter/cluster_manager.py new file mode 100644 index 0000000..a1f1602 --- /dev/null +++ b/defragmenter/cluster_manager.py @@ -0,0 +1,91 @@ +import struct +from pathlib import Path + +from defragmenter.cluster import Cluster +from defragmenter.directory_parser import DirectoryParser +from defragmenter.fat_reader import FatReader + +FAT_ENTRY_MASK = 0x0FFFFFFF +FAT_FREE_MASK = 0x00000000 + +class ClusterManager: + """ + Базовый класс для управления кластерами в файловой системе FAT32. + """ + def __init__(self, image_path: Path, fat_reader: FatReader, directory_parser: DirectoryParser) -> None: + self._image_path = image_path + self._fat_reader = fat_reader + self._directory_parser = directory_parser + self._bpb = fat_reader.bpb + self._free_clusters: list[int] = self._find_free_clusters() + + def find_fragmented_files(self, all_files: list[dict]) -> list[dict]: + fragmented_files = [] + for file_entry in all_files: + cluster_chain = self._fat_reader.get_cluster_chain(file_entry["starting_cluster"]) + if self._is_fragmented(cluster_chain): + fragmented_files.append({ + "path": file_entry["path"], + "cluster_chain": [cluster.index for cluster in cluster_chain] + }) + + return fragmented_files + + def _is_fragmented(self, cluster_chain: list[Cluster]) -> bool: + """ + Проверяет, является ли кластерная цепочка фрагментированной. + """ + for cluster_index in range(len(cluster_chain) - 1): + if cluster_chain[cluster_index].next_index != (cluster_chain[cluster_index].index + 1): + return True + return False + + def _write_fat(self) -> None: + """ + Записывает обновлённую FAT таблицу обратно в образ диска. + """ + with open(self._image_path, 'r+b') as f: + fat_start = self._bpb.reserved_sec_cnt * self._bpb.byts_per_sec + fat_size = self._bpb.fat_size_32 * self._bpb.byts_per_sec + fat_data = bytearray() + + for cluster in self._fat_reader.clusters: + fat_entry = cluster.next_index & FAT_ENTRY_MASK + fat_data += struct.pack(" None: + """ + Копирует данные из одного кластера в другой + """ + with open(self._image_path, 'r+b') as f: + old_data = self._fat_reader.read_cluster_data(self._fat_reader.clusters[old_cluster_index]) + f.seek(self._fat_reader.get_cluster_offset(new_cluster_index)) + f.write(old_data) + + def _update_directory_entry(self, file_entry: dict, new_start_cluster_index: int) -> None: + """ + Обновляет поле starting_cluster для файла в каталоге. + """ + self._directory_parser.update_starting_cluster(file_entry['path'], new_start_cluster_index) + + def _update_fat(self, old_clusters_indices: list[int], new_clusters_indices: list[int]) -> None: + """ + Обновляет FAT таблицу: освобождает старые кластеры и связывает новые кластеры. + """ + for cluster in old_clusters_indices: + self._fat_reader.clusters[cluster].next_index = FAT_FREE_MASK + + for i in range(len(new_clusters_indices) - 1): + self._fat_reader.clusters[new_clusters_indices[i]].next_index = new_clusters_indices[i + 1] + self._fat_reader.clusters[new_clusters_indices[-1]].next_index = FAT_ENTRY_MASK diff --git a/defragmenter.py b/defragmenter/defragmenter.py similarity index 53% rename from defragmenter.py rename to defragmenter/defragmenter.py index 11c18bf..75f44dc 100644 --- a/defragmenter.py +++ b/defragmenter/defragmenter.py @@ -1,26 +1,20 @@ -import struct from pathlib import Path -from cluster import Cluster -from directory_parser import DirectoryParser -from fat_reader import FatReader +from defragmenter.cluster_manager import ClusterManager +from defragmenter.directory_parser import DirectoryParser +from defragmenter.fat_reader import FatReader -FAT_END_MASK = 0x0FFFFFF8 FAT_ENTRY_MASK = 0x0FFFFFFF FAT_FREE_MASK = 0x00000000 ClusterIndexList = list[int] -class Defragmenter: +class Defragmenter(ClusterManager): """ Класс для дефрагментации файловой системы FAT32. """ def __init__(self, image_path: Path, fat_reader: FatReader, directory_parser: DirectoryParser) -> None: - self._image_path = image_path - self._fat_reader = fat_reader - self._directory_parser = directory_parser - self._bpb = fat_reader.bpb - self._free_clusters: list[int] = self._find_free_clusters() + super().__init__(image_path, fat_reader, directory_parser) def defragment(self) -> None: """ @@ -49,21 +43,6 @@ def defragment(self) -> None: self._write_fat() print("Дефрагментация завершена успешно.") - def _is_fragmented(self, cluster_chain: list[Cluster]) -> bool: - """ - Проверяет, является ли кластерная цепочка фрагментированной. - """ - for cluster_index in range(len(cluster_chain) - 1): - if cluster_chain[cluster_index].next_index != (cluster_chain[cluster_index].index + 1): - return True - return False - - def _find_free_clusters(self): - """ - Находит все свободные кластеры. - """ - return [cluster.index for cluster in self._fat_reader.clusters if cluster.next_index == FAT_FREE_MASK] - def _find_free_blocks(self) -> list[ClusterIndexList]: """ Находит все непрерывные блоки свободных кластеров. @@ -116,50 +95,4 @@ def _allocate_clusters(self, clusters_count: int) -> list[int]: self._free_clusters.remove(cluster) return new_clusters - def _copy_cluster_data(self, old_cluster_index: int, new_cluster_index: int) -> None: - """ - Копирует данные из одного кластера в другой - """ - with open(self._image_path, 'r+b') as f: - old_data = self._fat_reader.read_cluster_data(self._fat_reader.clusters[old_cluster_index]) - f.seek(self._fat_reader.get_cluster_offset(new_cluster_index)) - f.write(old_data) - - def _update_fat(self, old_clusters_indices: list[int], new_clusters_indices: list[int]) -> None: - """ - Обновляет FAT таблицу: освобождает старые кластеры и связывает новые кластеры. - """ - for cluster in old_clusters_indices: - self._fat_reader.clusters[cluster].next_index = FAT_FREE_MASK - - for i in range(len(new_clusters_indices)): - current_cluster = new_clusters_indices[i] - if i < len(new_clusters_indices) - 1: - self._fat_reader.clusters[current_cluster].next_index = new_clusters_indices[i + 1] - else: - self._fat_reader.clusters[current_cluster].next_index = FAT_ENTRY_MASK - print(f"FAT таблица обновлена для новых кластеров: {new_clusters_indices}") - - def _update_directory_entry(self, file_entry: dict, new_start_cluster_index: int) -> None: - """ - Обновляет поле starting_cluster для файла в каталоге. - """ - self._directory_parser.update_starting_cluster(file_entry['path'], new_start_cluster_index) - - def _write_fat(self) -> None: - """ - Записывает обновлённую FAT таблицу обратно в образ диска. - """ - with open(self._image_path, 'r+b') as f: - fat_start = self._bpb.reserved_sec_cnt * self._bpb.byts_per_sec - fat_size = self._bpb.fat_size_32 * self._bpb.byts_per_sec - fat_data = bytearray() - - for cluster in self._fat_reader.clusters: - fat_entry = cluster.next_index & FAT_ENTRY_MASK - fat_data += struct.pack(" None: + super().__init__(image_path, fat_reader, directory_parser) + + def fragment_file(self, file_path: Path) -> None: + """ + Фрагментирует указанный файл, разбивая его на несмежные кластеры. + """ + all_files = self._directory_parser.get_all_files(self._bpb.root_clus) + target_file = next((f for f in all_files if Path(f["path"]) == file_path), None) + if not target_file: + raise FileNotFoundError(f"Файл {file_path} не найден") + + cluster_chain = self._fat_reader.get_cluster_chain(target_file["starting_cluster"]) + cluster_indices = [cluster.index for cluster in cluster_chain] + + if len(cluster_indices) < 2: + raise ValueError(f"Файл '{file_path}' слишком мал для фрагментации.") + + print(f"Фрагментируем файл '{file_path}'") + + new_clusters = [] + for old_cluster_index in cluster_indices: + if not self._free_clusters: + print("Нет свободных кластеров для фрагментации.") + break + index = random.randrange(len(self._free_clusters)) + new_cluster = self._free_clusters.pop(index) + self._copy_cluster_data(old_cluster_index, new_cluster) + new_clusters.append(new_cluster) + + self._update_directory_entry(target_file, new_clusters[0]) + self._update_fat(cluster_indices, new_clusters) + self._write_fat() + print(f"Файл '{file_path}' успешно фрагментирован.") diff --git a/main.py b/main.py index 9e019b9..35201b9 100644 --- a/main.py +++ b/main.py @@ -2,22 +2,59 @@ import argparse from pathlib import Path -from bpb import BPB -from directory_parser import DirectoryParser -from fat_reader import FatReader -from defragmenter import Defragmenter +from defragmenter.bpb import BPB +from defragmenter.directory_parser import DirectoryParser +from defragmenter.fat_reader import FatReader +from defragmenter.defragmenter import Defragmenter +from defragmenter.fragmenter import Fragmenter +from defragmenter.cluster_manager import ClusterManager arg_parser = argparse.ArgumentParser() -arg_parser.add_argument("image_path", type=str) +subparsers = arg_parser.add_subparsers(dest='command', help='Доступные команды', required=True) + +defrag_parser = subparsers.add_parser('defragment') +defrag_parser.add_argument("image_path", type=str, help="Путь к образу файловой системы FAT32") + +frag_parser = subparsers.add_parser('fragment') +frag_parser.add_argument("image_path", type=str, help="Путь к образу файловой системы FAT32") +frag_parser.add_argument("file_path", type=str, help="Путь к файлу для фрагментации") + +check_parser = subparsers.add_parser('check', help="Просмотреть фрагментированные файлы") +check_parser.add_argument("image_path", type=str, help="Путь к образу файловой системы FAT32") + if __name__ == "__main__": args = arg_parser.parse_args() + command = args.command + image_path = Path(args.image_path) - final_image_path = image_path.with_name(f"{image_path.name}_defragmented") + final_image_path = image_path.with_name(f"{image_path.name}_{command}ed") + + if command == "check": + final_image_path = image_path + else: + shutil.copyfile(image_path, final_image_path) - shutil.copyfile(image_path, final_image_path) bpb = BPB(final_image_path) fat_reader = FatReader(final_image_path, bpb) parser = DirectoryParser(fat_reader) - defragmenter = Defragmenter(image_path, fat_reader, parser) - defragmenter.defragment() + + if command == "check": + all_files = parser.get_all_files(bpb.root_clus) + fragmented_files = ClusterManager(image_path, fat_reader, parser).find_fragmented_files(all_files) + print("\nВсе файлы:") + for file in all_files: + print(f"path: {file['path']}, starting_cluster: {file['starting_cluster']}, size: {file['size']}") + + print("\nФрагментированные файлы:") + for file in fragmented_files: + print(f"path: {file['path']}, cluster_chain: {file['cluster_chain']}") + + elif command == "defragment": + defragmenter = Defragmenter(final_image_path, fat_reader, parser) + defragmenter.defragment() + + elif command == "fragment": + file_path = Path(args.file_path) + fragmenter = Fragmenter(final_image_path, fat_reader, parser) + fragmenter.fragment_file(Path(args.file_path)) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..76501e0 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +minversion = 6.0 +addopts = --cov=defragmenter --cov-report=term-missing +testpaths = tests diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ac96e08 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +colorama==0.4.6 +coverage==7.6.10 +iniconfig==2.0.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..b774c52 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,34 @@ +import pytest +from unittest.mock import MagicMock + +@pytest.fixture +def mock_fat_reader(): + fat_reader = MagicMock() + fat_reader.bpb = MagicMock() + fat_reader.bpb.reserved_sec_cnt = 32 + fat_reader.bpb.byts_per_sec = 512 + fat_reader.bpb.fat_size_32 = 256 + fat_reader.bpb.root_clus = 2 + fat_reader.bpb.sec_per_clus = 8 + fat_reader.bpb.total_sec_32 = 100000 + fat_reader.cluster_size = 4096 + fat_reader.clusters = [] + for _ in range(1000): + fat_reader.clusters.append(MagicMock()) + for i, cluster in enumerate(fat_reader.clusters): + cluster.index = i + cluster.next_index = i + 1 if i <= 500 else i + 2 if i < 998 else 0x0FFFFFFF + fat_reader.get_cluster_offset.side_effect = lambda x: x * fat_reader.cluster_size + #fat_reader.read_cluster_data.side_effect = lambda cluster: b'Data' * 1024 + fat_reader.write_fat.return_value = None + return fat_reader + +@pytest.fixture +def mock_directory_parser(): + directory_parser = MagicMock() + directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 2, "size": 2048}, + {"path": "DIR2/FILE2.TXT", "starting_cluster": 10, "size": 4096}, + ] + directory_parser.update_starting_cluster.return_value = None + return directory_parser diff --git a/tests/test_bpb.py b/tests/test_bpb.py new file mode 100644 index 0000000..a9ac61e --- /dev/null +++ b/tests/test_bpb.py @@ -0,0 +1,16 @@ +from pathlib import Path + +from defragmenter.bpb import BPB + +def test_init(): + project_root = Path(__file__).resolve().parent.parent + image_path = project_root / "Images" / "FAT_32_32MB" + assert image_path.exists(), f"Файл {image_path} не найден." + bpb = BPB(image_path) + assert bpb.root_clus == 2 + assert bpb.byts_per_sec == 512 + assert bpb.num_fats == 2 + assert bpb.reserved_sec_cnt == 2782 + assert bpb.total_sec_32 == 65536 + assert bpb.fat_size_32 == 14993 + assert bpb.sec_per_clus == 32 \ No newline at end of file diff --git a/tests/test_cluster.py b/tests/test_cluster.py new file mode 100644 index 0000000..c5fc3fd --- /dev/null +++ b/tests/test_cluster.py @@ -0,0 +1,7 @@ +from defragmenter.cluster import Cluster + +def test_is_valid(): + valid_index_cluster = Cluster(100, 101, False) + assert valid_index_cluster.is_valid() + invalid_index_cluster = Cluster(1, 2, False) + assert not invalid_index_cluster.is_valid() diff --git a/tests/test_cluster_manager.py b/tests/test_cluster_manager.py new file mode 100644 index 0000000..f33c367 --- /dev/null +++ b/tests/test_cluster_manager.py @@ -0,0 +1,54 @@ +from pathlib import Path + +from defragmenter.bpb import BPB +from defragmenter.cluster_manager import ClusterManager +from defragmenter.directory_parser import DirectoryParser +from defragmenter.fat_reader import FatReader + + +def test_is_fragmented_not_fragmented(mock_fat_reader, mock_directory_parser): + manager = ClusterManager(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + cluster_chain = [mock_fat_reader.clusters[i] for i in range(2, 7)] + for i, cluster in enumerate(cluster_chain): + if i < len(cluster_chain) - 1: + cluster.next_index = cluster.index + 1 + else: + cluster.next_index = 0x0FFFFFFF + assert not manager._is_fragmented(cluster_chain) + +def test_is_fragmented_fragmented(mock_fat_reader, mock_directory_parser): + manager = ClusterManager(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + cluster_chain = [mock_fat_reader.clusters[i] for i in range(2, 7)] + cluster_chain[2].next_index = 20 + assert manager._is_fragmented(cluster_chain) + +def test_update_directory_entry_success(mock_fat_reader, mock_directory_parser): + manager = ClusterManager(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + file_entry = {"path": "DIR1/FILE1.TXT"} + manager._update_directory_entry(file_entry, 5) + mock_directory_parser.update_starting_cluster.assert_called_with("DIR1/FILE1.TXT", 5) + +def test_find_fragmented_files(): + project_root = Path(__file__).resolve().parent.parent + image_path = project_root / "Images" / "FAT_32_fragmented" + bpb = BPB(image_path) + fat_reader = FatReader(image_path, bpb) + directory_parser = DirectoryParser(fat_reader) + all_files = directory_parser.get_all_files(bpb.root_clus) + manager = ClusterManager(image_path, fat_reader, directory_parser) + fragmented_files = manager.find_fragmented_files(all_files) + expected_fragmented_files = [ + { + 'path': 'ASDA.TXT', + 'cluster_chain': [6, 1552, 1553, 1554, 1555, 1556] + }, + { + 'path': 'ipset-discord.txt', + 'cluster_chain': [1337, 1557, 1558, 1559, 1560, 1561] + }, + { + 'path': 'list-discord.txt', + 'cluster_chain': [1546, 1742] + } + ] + assert expected_fragmented_files == fragmented_files diff --git a/tests/test_defragmenter.py b/tests/test_defragmenter.py new file mode 100644 index 0000000..837d494 --- /dev/null +++ b/tests/test_defragmenter.py @@ -0,0 +1,63 @@ +import pytest +from unittest.mock import MagicMock, patch +from pathlib import Path + +from defragmenter.defragmenter import Defragmenter + +def test_defragment_no_fragmented_files(mock_fat_reader, mock_directory_parser): + defragmenter = Defragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + defragmenter.logger = MagicMock() + + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 2, "size": 4096}, + ] + cluster_chain = [mock_fat_reader.clusters[i] for i in range(2, 7)] + for i, cluster in enumerate(cluster_chain): + if i < len(cluster_chain) - 1: + cluster.next_index = cluster.index + 1 + else: + cluster.next_index = 0x0FFFFFFF + defragmenter._copy_cluster_data = MagicMock() + with patch('defragmenter.defragmenter.ClusterManager._write_fat', side_effect=None): + defragmenter.defragment() + defragmenter._copy_cluster_data.assert_not_called() + +def test_defragment_with_fragmented_files(mock_fat_reader, mock_directory_parser): + defragmenter = Defragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + defragmenter.logger = MagicMock() + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 567, "size": 4096}, + ] + cluster_chain = [mock_fat_reader.clusters[i] for i in range(2, 7)] + cluster_chain[2].next_index = 20 + mock_fat_reader.get_cluster_chain = MagicMock() + mock_fat_reader.get_cluster_chain.return_value = cluster_chain + defragmenter._copy_cluster_data = MagicMock() + defragmenter._update_fat = MagicMock() + defragmenter._update_directory_entry = MagicMock() + defragmenter._allocate_clusters = MagicMock() + defragmenter._allocate_clusters.return_value = [i for i in range(500, 505)] + + with patch('defragmenter.defragmenter.ClusterManager._write_fat', side_effect=None): + defragmenter.defragment() + + defragmenter._copy_cluster_data.assert_called() + defragmenter._update_fat.assert_called() + defragmenter._update_directory_entry.assert_called_with(mock_directory_parser.get_all_files.return_value[0], + defragmenter._allocate_clusters.return_value[0]) + +def test_defragment_write_fat_failure(mock_fat_reader, mock_directory_parser): + defragmenter = Defragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 2, "size": 4096}, + ] + cluster_chain = [mock_fat_reader.clusters[i] for i in range(2, 7)] + cluster_chain[2].next_index = 20 + with pytest.raises(Exception): + with patch('defragmenter.defragmenter.ClusterManager._write_fat', side_effect=Exception("Write FAT failed")): + defragmenter.defragment() + +def test_find_free_blocks(mock_fat_reader, mock_directory_parser): + defragmenter = Defragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + defragmenter._free_clusters = MagicMock().return_value = [2, 3, 4, 7, 8, 10, 11, 12, 13] + assert defragmenter._find_free_blocks() == [[2, 3, 4], [7, 8], [10, 11, 12, 13]] \ No newline at end of file diff --git a/tests/test_directory_parser.py b/tests/test_directory_parser.py new file mode 100644 index 0000000..368407d --- /dev/null +++ b/tests/test_directory_parser.py @@ -0,0 +1,104 @@ +from pathlib import Path + +import pytest + +from defragmenter.fat_reader import FatReader +from defragmenter.directory_parser import DirectoryParser +from defragmenter.bpb import BPB + + +@pytest.fixture +def directory_parser(): + project_root = Path(__file__).resolve().parent.parent + image_path = project_root / "Images" / "FAT_32_32MB" + assert image_path.exists(), f"Файл {image_path} не найден." + fat_reader = FatReader(image_path, BPB(image_path)) + return DirectoryParser(fat_reader) + +def test_init(directory_parser): + assert directory_parser is not None + + +def test_parse_directory_entries(directory_parser): + first_cluster = directory_parser.fat_reader.clusters[directory_parser.fat_reader.bpb.root_clus] + cluster_data = directory_parser.fat_reader.read_cluster_data(first_cluster) + + entries = directory_parser.parse_directory_entries(cluster_data) + + assert isinstance(entries, list) + + expected_file = { + 'attributes': 32, + 'name': 'ASDA.TXT', + 'size': 1644, + 'starting_cluster': 6 + } + + assert expected_file in entries, f"{expected_file} не найден в записях каталога." + + +def test_get_all_files(directory_parser): + all_files = directory_parser.get_all_files(directory_parser.fat_reader.bpb.root_clus) + assert isinstance(all_files, list) + expected_files = [ + { + 'path': 'ASDA.TXT', + 'size': 1644, + 'starting_cluster': 6 + }, + { + 'path': 'PortScan/Parser.py', + 'size': 2986, + 'starting_cluster': 9 + }, + { + 'path': 'PortScan/__pycache__/TCP_Scanner.cpython-311.pyc', + 'starting_cluster': 25, + 'size': 8589 + } + ] + + for file in expected_files: + assert file in all_files, f"Файл {file} не найден в списке всех файлов." + + +def test_find_directory_entry_existing_file(directory_parser): + target_name = "ASDA.TXT" + root_cluster = directory_parser.fat_reader.bpb.root_clus + result = directory_parser.find_directory_entry(root_cluster, target_name) + assert result is not None, f"Запись для {target_name} не найдена." + entry_offset, cluster_index = result + assert isinstance(entry_offset, int) + assert isinstance(cluster_index, int) + +def test_find_directory_entry_nonexistent_file(directory_parser): + target_name = "NONEXISTENT.TXT" + root_cluster = directory_parser.fat_reader.bpb.root_clus + result = directory_parser.find_directory_entry(root_cluster, target_name) + assert result is None, f"Запись для {target_name} должна отсутствовать." + +def test_navigate_path_valid_path(directory_parser): + path = ['PortScan', '__pycache__', 'TCP_Scanner.cpython-311.pyc'] + path_parts = path + result = directory_parser.navigate_path(path_parts) + assert isinstance(result, int) + assert result >= 2 + +def test_navigate_path_invalid_path(directory_parser): + path = ["nonexistent_dir", "file.txt"] + path_parts = path + result = directory_parser.navigate_path(path_parts) + assert result is None, "Путь должен отсутствовать." + +def test_find_subdirectory_cluster_existing(directory_parser): + subdir_name = "PortScan" + root_cluster = directory_parser.fat_reader.bpb.root_clus + result = directory_parser.find_subdirectory_cluster(root_cluster, subdir_name) + assert isinstance(result, int), "Кластер подкаталога должен быть целым числом." + assert result >= 2, "Кластер должен быть валидным (>=2)." + +def test_find_subdirectory_cluster_nonexistent(directory_parser): + subdir_name = "ghost_dir" + root_cluster = directory_parser.fat_reader.bpb.root_clus + result = directory_parser.find_subdirectory_cluster(root_cluster, subdir_name) + assert result is None, "Кластер подкаталога не должен существовать." diff --git a/tests/test_fragmenter.py b/tests/test_fragmenter.py new file mode 100644 index 0000000..19a3792 --- /dev/null +++ b/tests/test_fragmenter.py @@ -0,0 +1,56 @@ +import pytest +from unittest.mock import MagicMock, patch +from pathlib import Path + +from defragmenter.fragmenter import Fragmenter + +def test_fragment_file_success(mock_fat_reader, mock_directory_parser): + fragmenter = Fragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + fragmenter.logger = MagicMock() + fragmenter._write_fat = MagicMock() + fragmenter._update_fat = MagicMock() + fragmenter._update_directory_entry = MagicMock() + fragmenter._copy_cluster_data = MagicMock() + fragmenter._free_clusters = MagicMock().return_value = [i for i in range(1000)] + + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 2, "size": 4096} + ] + target_file = {"path": "DIR1/FILE1.TXT", "starting_cluster": 2, "size": 4096} + mock_fat_reader.get_cluster_chain.side_effect = lambda start: mock_fat_reader.clusters[start:start+4] + + with patch('random.randrange', return_value=100): + fragmenter.fragment_file(Path("DIR1/FILE1.TXT")) + + fragmenter._copy_cluster_data.assert_called() + fragmenter._update_fat.assert_called() + fragmenter._update_directory_entry.assert_called_with(target_file, 100) + +def test_fragment_file_not_found(mock_fat_reader, mock_directory_parser): + fragmenter = Fragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + fragmenter.logger = MagicMock() + mock_directory_parser.get_all_files.return_value = [] + with pytest.raises(FileNotFoundError): + fragmenter.fragment_file(Path("DIR1/FILE1.TXT")) + + +def test_fragment_file_small_file(mock_fat_reader, mock_directory_parser): + fragmenter = Fragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + fragmenter.logger = MagicMock() + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/SMALL.TXT", "starting_cluster": 2, "size": 512} # Размер меньше 2 кластеров + ] + mock_fat_reader.get_cluster_chain.side_effect = lambda start: mock_fat_reader.clusters[start] + with pytest.raises(ValueError): + fragmenter.fragment_file(Path("DIR1/SMALL.TXT")) + +def test_fragment_file_copy_failure(mock_fat_reader, mock_directory_parser): + fragmenter = Fragmenter(Path("Images/FAT_32_32MB"), mock_fat_reader, mock_directory_parser) + fragmenter.logger = MagicMock() + fragmenter._copy_cluster_data = MagicMock(side_effect=Exception("Copy failed")) + mock_directory_parser.get_all_files.return_value = [ + {"path": "DIR1/FILE1.TXT", "starting_cluster": 600, "size": 4096} + ] + mock_fat_reader.get_cluster_chain.side_effect = lambda start: mock_fat_reader.clusters[start:start+2] + with pytest.raises(Exception): + fragmenter.fragment_file(Path("DIR1/FILE1.TXT"))