Skip to content

Fix uninitialized reads in parquet chunked reader #17810

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions cpp/src/io/parquet/reader_impl_chunking.cu
Original file line number Diff line number Diff line change
Expand Up @@ -474,8 +474,16 @@ adjust_cumulative_sizes(device_span<cumulative_page_info const> c_info,
.second;

size_t const num_unique_keys = key_offsets_end - key_offsets.begin();
thrust::exclusive_scan(
rmm::exec_policy_nosync(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin());
auto key_offset_iter = cudf::detail::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<size_type>(
[num_unique_keys, key_offsets = key_offsets.begin()] __device__(size_type i) {
return i >= num_unique_keys ? 0 : key_offsets[i];
}));
thrust::exclusive_scan(rmm::exec_policy_nosync(stream),
key_offset_iter,
key_offset_iter + num_unique_keys + 1,
key_offsets.begin());

// adjust the cumulative info such that for each row count, the size includes any pages that span
// that row count. this is so that if we have this case:
Expand Down
18 changes: 12 additions & 6 deletions cpp/src/io/parquet/reader_impl_preprocess.cu
Original file line number Diff line number Diff line change
Expand Up @@ -615,10 +615,10 @@ void decode_page_headers(pass_intermediate_data& pass,
pass.pages = sort_pages(unsorted_pages, pass.chunks, stream);

// compute offsets to each group of input pages.
// page_keys: 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// result: 0, 4, 8
rmm::device_uvector<size_type> page_counts(pass.pages.size() + 1, stream);
// page_keys: 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
// page_counts: 4, 4, 4
// page_offsets: 0, 4, 8, 12 //End
rmm::device_uvector<size_type> page_counts(pass.pages.size(), stream);
auto page_keys = make_page_key_iterator(pass.pages);
auto const page_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys,
Expand All @@ -629,9 +629,15 @@ void decode_page_headers(pass_intermediate_data& pass,
.second;
auto const num_page_counts = page_counts_end - page_counts.begin();
pass.page_offsets = rmm::device_uvector<size_type>(num_page_counts + 1, stream);
auto page_count_iter = cudf::detail::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<size_type>(
[num_page_counts, page_counts = page_counts.begin()] __device__(size_type i) {
return i >= num_page_counts ? 0 : page_counts[i];
}));
thrust::exclusive_scan(rmm::exec_policy_nosync(stream),
page_counts.begin(),
page_counts.begin() + num_page_counts + 1,
page_count_iter,
page_count_iter + num_page_counts + 1,
pass.page_offsets.begin());

// setup dict_page for each chunk if necessary
Expand Down
Loading