Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ endif::[]
== 0.5.3.3

=== Fixes

* fix: make committed offset accurate when partition assigned to avoid offset reset (#894)
* fix: close parallel consumer on transactional mode when InvalidPidMappingException (#830)
* fix: support kafka-clients 3.9.0 (#841)
* fix: Paused consumption across multiple consumers (#857)
Expand Down
2 changes: 1 addition & 1 deletion README.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -1543,7 +1543,7 @@ endif::[]
== 0.5.3.3

=== Fixes

* fix: make committed offset accurate when partition assigned to avoid offset reset (#894)
* fix: close parallel consumer on transactional mode when InvalidPidMappingException (#830)
* fix: support kafka-clients 3.9.0 (#841)
* fix: Paused consumption across multiple consumers (#857)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
package io.confluent.parallelconsumer.state;

/*-
* Copyright (C) 2020-2024 Confluent, Inc.
* Copyright (C) 2020-2025 Confluent, Inc.
*/

import io.confluent.parallelconsumer.ParallelConsumer;
import io.confluent.parallelconsumer.internal.BrokerPollSystem;
import io.confluent.parallelconsumer.internal.EpochAndRecordsMap;
import io.confluent.parallelconsumer.internal.PCModule;
Expand Down Expand Up @@ -410,8 +411,11 @@ public Optional<OffsetAndMetadata> getCommitDataIfDirty() {

// visible for testing
protected OffsetAndMetadata createOffsetAndMetadata() {
Optional<String> payloadOpt = tryToEncodeOffsets();
long nextOffset = getOffsetToCommit();
// use tuple to make sure getOffsetToCommit is invoked only once to avoid dirty read
// and commit the wrong offset
ParallelConsumer.Tuple<Optional<String>, Long> tuple = tryToEncodeOffsets();
Optional<String> payloadOpt = tuple.getLeft();
long nextOffset = tuple.getRight();
return payloadOpt
.map(encodedOffsets -> new OffsetAndMetadata(nextOffset, encodedOffsets))
.orElseGet(() -> new OffsetAndMetadata(nextOffset));
Expand Down Expand Up @@ -481,29 +485,30 @@ public long getOffsetHighestSequentialSucceeded() {
*
* @return if possible, the String encoded offset map
*/
private Optional<String> tryToEncodeOffsets() {
private ParallelConsumer.Tuple<Optional<String>, Long> tryToEncodeOffsets() {
long offsetOfNextExpectedMessage = getOffsetToCommit();

if (incompleteOffsets.isEmpty()) {
setAllowedMoreRecords(true);
return empty();
return ParallelConsumer.Tuple.pairOf(empty(), offsetOfNextExpectedMessage);
}

try {
// todo refactor use of null shouldn't be needed. Is OffsetMapCodecManager stateful? remove null #233
long offsetOfNextExpectedMessage = getOffsetToCommit();
var offsetRange = getOffsetHighestSucceeded() - offsetOfNextExpectedMessage;
String offsetMapPayload = om.makeOffsetMetadataPayload(offsetOfNextExpectedMessage, this);
ratioPayloadUsedDistributionSummary.record(offsetMapPayload.length() / (double) offsetRange);
ratioMetadataSpaceUsedDistributionSummary.record(offsetMapPayload.length() / (double) OffsetMapCodecManager.DefaultMaxMetadataSize);
boolean mustStrip = updateBlockFromEncodingResult(offsetMapPayload);
if (mustStrip) {
return empty();
return ParallelConsumer.Tuple.pairOf(empty(), offsetOfNextExpectedMessage);
} else {
return of(offsetMapPayload);
return ParallelConsumer.Tuple.pairOf(of(offsetMapPayload), offsetOfNextExpectedMessage);
}
} catch (NoEncodingPossibleException e) {
setAllowedMoreRecords(false);
log.warn("No encodings could be used to encode the offset map, skipping. Warning: messages might be replayed on rebalance.", e);
return empty();
return ParallelConsumer.Tuple.pairOf(empty(), offsetOfNextExpectedMessage);
}
}

Expand Down