@@ -83,9 +83,9 @@ join(RecentBI) ->
83
83
add_tip_block (BlockTXPairs , RecentBI ) ->
84
84
gen_server :cast (ar_data_sync_default , {add_tip_block , BlockTXPairs , RecentBI }).
85
85
86
- invalidate_bad_data_record (Byte , AbsoluteEndOffset , StoreID , Case ) ->
86
+ invalidate_bad_data_record (AbsoluteEndOffset , ChunkSize , StoreID , Case ) ->
87
87
gen_server :cast (name (StoreID ), {invalidate_bad_data_record ,
88
- {Byte , AbsoluteEndOffset , StoreID , Case }}).
88
+ {AbsoluteEndOffset , ChunkSize , StoreID , Case }}).
89
89
90
90
% % @doc The condition which is true if the chunk is too small compared to the proof.
91
91
% % Small chunks make syncing slower and increase space amplification. A small chunk
@@ -1706,8 +1706,8 @@ get_chunk(Offset, SeekOffset, Pack, Packing, StoredPacking, StoreID, RequestOrig
1706
1706
{store_id , StoreID },
1707
1707
{expected_chunk_id , ar_util :encode (ChunkID )},
1708
1708
{chunk_id , ar_util :encode (ComputedChunkID )}]),
1709
- invalidate_bad_data_record ({AbsoluteOffset - ChunkSize ,
1710
- AbsoluteOffset , StoreID , get_chunk_invalid_id }),
1709
+ invalidate_bad_data_record ({AbsoluteOffset , ChunkSize ,
1710
+ StoreID , get_chunk_invalid_id }),
1711
1711
{error , chunk_not_found }
1712
1712
end
1713
1713
end
@@ -1792,7 +1792,7 @@ read_chunk_with_metadata(
1792
1792
{modules_covering_seek_offset , ModuleIDs },
1793
1793
{chunk_data_key , ar_util :encode (ChunkDataKey )},
1794
1794
{read_fun , ReadFun }]),
1795
- invalidate_bad_data_record ({SeekOffset - 1 , AbsoluteOffset , StoreID ,
1795
+ invalidate_bad_data_record ({AbsoluteOffset , ChunkSize , StoreID ,
1796
1796
failed_to_read_chunk_data_path }),
1797
1797
{error , chunk_not_found };
1798
1798
{error , Error } ->
@@ -1827,33 +1827,25 @@ read_chunk_with_metadata(
1827
1827
end
1828
1828
end .
1829
1829
1830
- invalidate_bad_data_record ({Byte , AbsoluteEndOffset , StoreID , Type }) ->
1831
- case AbsoluteEndOffset - Byte =< ? DATA_CHUNK_SIZE of
1830
+ invalidate_bad_data_record ({AbsoluteEndOffset , ChunkSize , StoreID , Type }) ->
1831
+ [{_ , T }] = ets :lookup (ar_data_sync_state , disk_pool_threshold ),
1832
+ case AbsoluteEndOffset > T of
1832
1833
true ->
1833
1834
[{_ , T }] = ets :lookup (ar_data_sync_state , disk_pool_threshold ),
1834
1835
case AbsoluteEndOffset > T of
1835
1836
true ->
1836
1837
% % Do not invalidate fresh records - a reorg may be in progress.
1837
1838
ok ;
1838
1839
false ->
1839
- invalidate_bad_data_record2 ({Byte , AbsoluteEndOffset , StoreID , Type })
1840
+ invalidate_bad_data_record2 ({AbsoluteEndOffset , ChunkSize , StoreID , Type })
1840
1841
end ;
1841
1842
false ->
1842
- ? LOG_WARNING ([{event , bad_offset_while_invalidating_data_record }, {type , Type },
1843
- {range_start , Byte }, {range_end , AbsoluteEndOffset }, {store_id , StoreID }]),
1844
- ok
1843
+ invalidate_bad_data_record2 ({AbsoluteEndOffset , ChunkSize , StoreID , Type })
1845
1844
end .
1846
1845
1847
- invalidate_bad_data_record2 ({Byte , AbsoluteEndOffset , StoreID , Type }) ->
1846
+ invalidate_bad_data_record2 ({AbsoluteEndOffset , ChunkSize , StoreID , Type }) ->
1848
1847
PaddedEndOffset = ar_block :get_chunk_padded_offset (AbsoluteEndOffset ),
1849
- MaybePaddedStartOffset = ar_block :get_chunk_padded_offset (Byte ),
1850
- StartOffset =
1851
- case MaybePaddedStartOffset == PaddedEndOffset of
1852
- true ->
1853
- PaddedEndOffset - ? DATA_CHUNK_SIZE ;
1854
- false ->
1855
- MaybePaddedStartOffset
1856
- end ,
1848
+ StartOffset = AbsoluteEndOffset - ChunkSize ,
1857
1849
? LOG_WARNING ([{event , invalidating_bad_data_record }, {type , Type },
1858
1850
{range_start , StartOffset }, {range_end , PaddedEndOffset },
1859
1851
{store_id , StoreID }]),
@@ -1876,24 +1868,24 @@ invalidate_bad_data_record2({Byte, AbsoluteEndOffset, StoreID, Type}) ->
1876
1868
1877
1869
remove_invalid_sync_records (PaddedEndOffset , StartOffset , StoreID ) ->
1878
1870
Remove1 = ar_sync_record :delete (PaddedEndOffset , StartOffset , ar_data_sync , StoreID ),
1879
- IsSmallChunk = PaddedEndOffset - StartOffset < ? DATA_CHUNK_SIZE ,
1871
+ IsSmallChunkBeforeThreshold = PaddedEndOffset - StartOffset < ? DATA_CHUNK_SIZE ,
1880
1872
Remove2 =
1881
- case {Remove1 , IsSmallChunk } of
1873
+ case {Remove1 , IsSmallChunkBeforeThreshold } of
1882
1874
{ok , false } ->
1883
1875
ar_sync_record :delete (PaddedEndOffset , StartOffset ,
1884
1876
ar_chunk_storage , StoreID );
1885
1877
_ ->
1886
1878
Remove1
1887
1879
end ,
1888
1880
Remove3 =
1889
- case {Remove2 , IsSmallChunk } of
1881
+ case {Remove2 , IsSmallChunkBeforeThreshold } of
1890
1882
{ok , false } ->
1891
1883
ar_sync_record :delete (PaddedEndOffset , StartOffset ,
1892
1884
ar_chunk_storage_replica_2_9_1_entropy , StoreID );
1893
1885
_ ->
1894
1886
Remove2
1895
1887
end ,
1896
- case {Remove3 , IsSmallChunk } of
1888
+ case {Remove3 , IsSmallChunkBeforeThreshold } of
1897
1889
{ok , false } ->
1898
1890
ar_sync_record :delete (PaddedEndOffset , StartOffset ,
1899
1891
ar_chunk_storage_replica_2_9_1_unpacked , StoreID );
@@ -1922,16 +1914,15 @@ validate_fetched_chunk(Args) ->
1922
1914
false ->
1923
1915
log_chunk_error (RequestOrigin , failed_to_validate_chunk_proofs ,
1924
1916
[{absolute_end_offset , Offset }, {store_id , StoreID }]),
1925
- StartOffset = Offset - ChunkSize ,
1926
- invalidate_bad_data_record ({StartOffset , Offset , StoreID ,
1917
+ invalidate_bad_data_record ({Offset , ChunkSize , StoreID ,
1927
1918
failed_to_validate_chunk_proofs }),
1928
1919
false
1929
1920
end ;
1930
1921
{_BlockStart , _BlockEnd , TXRoot2 } ->
1931
1922
log_chunk_error (stored_chunk_invalid_tx_root ,
1932
1923
[{end_offset , Offset }, {tx_root , ar_util :encode (TXRoot2 )},
1933
1924
{stored_tx_root , ar_util :encode (TXRoot )}, {store_id , StoreID }]),
1934
- invalidate_bad_data_record ({Offset - ChunkSize , Offset , StoreID ,
1925
+ invalidate_bad_data_record ({Offset , ChunkSize , StoreID ,
1935
1926
stored_chunk_invalid_tx_root }),
1936
1927
false
1937
1928
end
0 commit comments