Skip to content

Commit

Permalink
apacheGH-38090: [C++][Emscripten] compute/kernels/scalar_if_else: Sup…
Browse files Browse the repository at this point in the history
…press shorten-64-to-32 warnings

We need explicit cast to use `int64_t` for `size_t` on Emscripten.

Explicit casts.
  • Loading branch information
kou committed Oct 6, 2023
1 parent 3697bcd commit 2797486
Showing 1 changed file with 40 additions and 30 deletions.
70 changes: 40 additions & 30 deletions cpp/src/arrow/compute/kernels/scalar_if_else.cc
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ struct IfElseFunctor<Type,
/*CopyArrayData*/
[&](const ArraySpan& valid_array, ArraySpan* out_array) {
std::memcpy(out_array->GetValues<T>(1), valid_array.GetValues<T>(1),
valid_array.length * sizeof(T));
static_cast<size_t>(valid_array.length) * sizeof(T));
},
/*BroadcastScalar*/
[&](const Scalar& scalar, ArraySpan* out_array) {
Expand All @@ -453,13 +453,14 @@ struct IfElseFunctor<Type,
T* out_values = out->array_span_mutable()->GetValues<T>(1);

// copy right data to out_buff
std::memcpy(out_values, right.GetValues<T>(1), right.length * sizeof(T));
std::memcpy(out_values, right.GetValues<T>(1),
static_cast<size_t>(right.length) * sizeof(T));

// selectively copy values from left data
const T* left_data = left.GetValues<T>(1);
RunIfElseLoop(cond, [&](int64_t data_offset, int64_t num_elems) {
std::memcpy(out_values + data_offset, left_data + data_offset,
num_elems * sizeof(T));
static_cast<size_t>(num_elems) * sizeof(T));
});

return Status::OK();
Expand All @@ -471,7 +472,8 @@ struct IfElseFunctor<Type,
T* out_values = out->array_span_mutable()->GetValues<T>(1);

// copy right data to out_buff
std::memcpy(out_values, right.GetValues<T>(1), right.length * sizeof(T));
std::memcpy(out_values, right.GetValues<T>(1),
static_cast<size_t>(right.length) * sizeof(T));

if (!left.is_valid) { // left is null scalar, only need to copy right data to output
return Status::OK();
Expand All @@ -495,7 +497,7 @@ struct IfElseFunctor<Type,

// copy left data to out_buff
const T* left_data = left.GetValues<T>(1);
std::memcpy(out_values, left_data, left.length * sizeof(T));
std::memcpy(out_values, left_data, static_cast<size_t>(left.length) * sizeof(T));

if (!right.is_valid) { // right is null scalar, only need to copy left data to output
return Status::OK();
Expand Down Expand Up @@ -741,11 +743,13 @@ struct IfElseFunctor<Type, enable_if_base_binary<Type>> {
auto* out_data = out->array_data().get();
auto offset_length = (cond.length + 1) * sizeof(OffsetType);
ARROW_ASSIGN_OR_RAISE(out_data->buffers[1], ctx->Allocate(offset_length));
std::memcpy(out_data->buffers[1]->mutable_data(), right_offsets, offset_length);
std::memcpy(out_data->buffers[1]->mutable_data(), right_offsets,
static_cast<size_t>(offset_length));

auto right_data_length = right_offsets[right.length] - right_offsets[0];
ARROW_ASSIGN_OR_RAISE(out_data->buffers[2], ctx->Allocate(right_data_length));
std::memcpy(out_data->buffers[2]->mutable_data(), right_data, right_data_length);
std::memcpy(out_data->buffers[2]->mutable_data(), right_data,
static_cast<size_t>(right_data_length));
return Status::OK();
}

Expand Down Expand Up @@ -781,11 +785,13 @@ struct IfElseFunctor<Type, enable_if_base_binary<Type>> {
auto* out_data = out->array_data().get();
auto offset_length = (cond.length + 1) * sizeof(OffsetType);
ARROW_ASSIGN_OR_RAISE(out_data->buffers[1], ctx->Allocate(offset_length));
std::memcpy(out_data->buffers[1]->mutable_data(), left_offsets, offset_length);
std::memcpy(out_data->buffers[1]->mutable_data(), left_offsets,
static_cast<size_t>(offset_length));

auto left_data_length = left_offsets[left.length] - left_offsets[0];
ARROW_ASSIGN_OR_RAISE(out_data->buffers[2], ctx->Allocate(left_data_length));
std::memcpy(out_data->buffers[2]->mutable_data(), left_data, left_data_length);
std::memcpy(out_data->buffers[2]->mutable_data(), left_data,
static_cast<size_t>(left_data_length));
return Status::OK();
}

Expand Down Expand Up @@ -874,14 +880,15 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {
[&](const ArraySpan& valid_array, ArraySpan* out_array) {
std::memcpy(out_array->buffers[1].data + out_array->offset * byte_width,
valid_array.buffers[1].data + valid_array.offset * byte_width,
valid_array.length * byte_width);
static_cast<size_t>(valid_array.length * byte_width));
},
/*BroadcastScalar*/
[&](const Scalar& scalar, ArraySpan* out_array) {
const uint8_t* scalar_data = UnboxBinaryScalar(scalar);
uint8_t* start = out_array->buffers[1].data + out_array->offset * byte_width;
for (int64_t i = 0; i < out_array->length; i++) {
std::memcpy(start + i * byte_width, scalar_data, byte_width);
std::memcpy(start + i * byte_width, scalar_data,
static_cast<size_t>(byte_width));
}
});
}
Expand All @@ -896,14 +903,15 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {

// copy right data to out_buff
const uint8_t* right_data = right.buffers[1].data + right.offset * byte_width;
std::memcpy(out_values, right_data, right.length * byte_width);
std::memcpy(out_values, right_data, static_cast<size_t>(right.length * byte_width));

// selectively copy values from left data
const uint8_t* left_data = left.buffers[1].data + left.offset * byte_width;

RunIfElseLoop(cond, [&](int64_t data_offset, int64_t num_elems) {
std::memcpy(out_values + data_offset * byte_width,
left_data + data_offset * byte_width, num_elems * byte_width);
left_data + data_offset * byte_width,
static_cast<size_t>(num_elems * byte_width));
});

return Status::OK();
Expand All @@ -919,15 +927,16 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {

// copy right data to out_buff
const uint8_t* right_data = right.buffers[1].data + right.offset * byte_width;
std::memcpy(out_values, right_data, right.length * byte_width);
std::memcpy(out_values, right_data, static_cast<size_t>(right.length * byte_width));

// selectively copy values from left data
const uint8_t* left_data = UnboxBinaryScalar(left);

RunIfElseLoop(cond, [&](int64_t data_offset, int64_t num_elems) {
if (left_data) {
for (int64_t i = 0; i < num_elems; i++) {
std::memcpy(out_values + (data_offset + i) * byte_width, left_data, byte_width);
std::memcpy(out_values + (data_offset + i) * byte_width, left_data,
static_cast<size_t>(byte_width));
}
}
});
Expand All @@ -945,15 +954,15 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {

// copy left data to out_buff
const uint8_t* left_data = left.buffers[1].data + left.offset * byte_width;
std::memcpy(out_values, left_data, left.length * byte_width);
std::memcpy(out_values, left_data, static_cast<size_t>(left.length * byte_width));

const uint8_t* right_data = UnboxBinaryScalar(right);

RunIfElseLoopInverted(cond, [&](int64_t data_offset, int64_t num_elems) {
if (right_data) {
for (int64_t i = 0; i < num_elems; i++) {
std::memcpy(out_values + (data_offset + i) * byte_width, right_data,
byte_width);
static_cast<size_t>(byte_width));
}
}
});
Expand All @@ -973,7 +982,8 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {
const uint8_t* right_data = UnboxBinaryScalar(right);
if (right_data) {
for (int64_t i = 0; i < cond.length; i++) {
std::memcpy(out_values + i * byte_width, right_data, byte_width);
std::memcpy(out_values + i * byte_width, right_data,
static_cast<size_t>(byte_width));
}
}

Expand All @@ -982,7 +992,8 @@ struct IfElseFunctor<Type, enable_if_fixed_size_binary<Type>> {
RunIfElseLoop(cond, [&](int64_t data_offset, int64_t num_elems) {
if (left_data) {
for (int64_t i = 0; i < num_elems; i++) {
std::memcpy(out_values + (data_offset + i) * byte_width, left_data, byte_width);
std::memcpy(out_values + (data_offset + i) * byte_width, left_data,
static_cast<size_t>(byte_width));
}
}
});
Expand Down Expand Up @@ -1547,7 +1558,7 @@ Status ExecArrayCaseWhen(KernelContext* ctx, const ExecSpan& batch, ExecResult*
// Allocate a temporary bitmap to determine which elements still need setting.
ARROW_ASSIGN_OR_RAISE(auto mask_buffer, ctx->AllocateBitmap(batch.length));
uint8_t* mask = mask_buffer->mutable_data();
std::memset(mask, 0xFF, mask_buffer->size());
std::memset(mask, 0xFF, static_cast<size_t>(mask_buffer->size()));

// Then iterate through each argument in turn and set elements.
for (int i = 0; i < batch.num_values() - (have_else_arg ? 2 : 1); i++) {
Expand Down Expand Up @@ -1620,7 +1631,7 @@ Status ExecArrayCaseWhen(KernelContext* ctx, const ExecSpan& batch, ExecResult*
bit_util::SetBitsTo(out_values, out_offset + offset, block.length, false);
} else {
std::memset(out_values + (out_offset + offset) * byte_width, 0x00,
byte_width * block.length);
static_cast<size_t>(byte_width * block.length));
}
} else if (!block.NoneSet()) {
for (int64_t j = 0; j < block.length; ++j) {
Expand All @@ -1629,7 +1640,7 @@ Status ExecArrayCaseWhen(KernelContext* ctx, const ExecSpan& batch, ExecResult*
bit_util::ClearBit(out_values, out_offset + offset + j);
} else {
std::memset(out_values + (out_offset + offset + j) * byte_width, 0x00,
byte_width);
static_cast<size_t>(byte_width));
}
}
}
Expand Down Expand Up @@ -1707,23 +1718,22 @@ static Status ExecVarWidthArrayCaseWhenImpl(

for (int64_t row = 0; row < batch.length; row++) {
int64_t selected = have_else_arg ? (batch.num_values() - 1) : -1;
for (int64_t arg = 0; static_cast<size_t>(arg) < conds_array.child_data.size();
arg++) {
for (size_t arg = 0; arg < conds_array.child_data.size(); arg++) {
const ArraySpan& cond_array = conds_array.child_data[arg];
if ((cond_array.buffers[0].data == nullptr ||
bit_util::GetBit(cond_array.buffers[0].data,
conds_array.offset + cond_array.offset + row)) &&
bit_util::GetBit(cond_array.buffers[1].data,
conds_array.offset + cond_array.offset + row)) {
selected = arg + 1;
selected = static_cast<int64_t>(arg) + 1;
break;
}
}
if (selected < 0) {
RETURN_NOT_OK(raw_builder->AppendNull());
continue;
}
const ExecValue& source = batch[selected];
const ExecValue& source = batch[static_cast<size_t>(selected)];
if (source.is_scalar()) {
const Scalar& scalar = *source.scalar;
if (!scalar.is_valid) {
Expand Down Expand Up @@ -2017,7 +2027,7 @@ void InitializeNullSlots(const DataType& type, uint8_t* out_valid, uint8_t* out_
bit_util::SetBitsTo(out_values, out_offset + offset, run.length, false);
} else {
std::memset(out_values + (out_offset + offset) * byte_width, 0,
byte_width * run.length);
static_cast<size_t>(byte_width * run.length));
}
}
offset += run.length;
Expand Down Expand Up @@ -2522,7 +2532,7 @@ Status ExecArrayChoose(KernelContext* ctx, const ExecSpan& batch, ExecResult* ou
if (index < 0 || (index + 1) >= batch.num_values()) {
return Status::IndexError("choose: index ", index, " out of range");
}
const auto& source = batch.values[index + 1];
const auto& source = batch.values[static_cast<size_t>(index + 1)];
CopyOneValue<Type>(source, row, out_valid, out_values, out_offset + row);
row++;
return Status::OK();
Expand Down Expand Up @@ -2575,7 +2585,7 @@ struct ChooseFunctor<Type, enable_if_base_binary<Type>> {
if (index < 0 || (index + 1) >= batch.num_values()) {
return Status::IndexError("choose: index ", index, " out of range");
}
const ExecValue& source = batch.values[index + 1];
const ExecValue& source = batch.values[static_cast<size_t>(index + 1)];
if (source.is_scalar()) {
ARROW_ASSIGN_OR_RAISE(
std::shared_ptr<Array> temp_array,
Expand Down Expand Up @@ -2618,7 +2628,7 @@ struct ChooseFunctor<Type, enable_if_base_binary<Type>> {
if (index < 0 || static_cast<size_t>(index + 1) >= batch.values.size()) {
return Status::IndexError("choose: index ", index, " out of range");
}
const auto& source = batch.values[index + 1];
const auto& source = batch.values[static_cast<size_t>(index + 1)];
return CopyValue(source, &builder, row++);
},
[&]() {
Expand Down

0 comments on commit 2797486

Please sign in to comment.