use of org.apache.kafka.raft.errors.NotLeaderException in project kafka by apache.
the class KafkaRaftClient method append.
private long append(int epoch, List<T> records, boolean isAtomic) {
LeaderState<T> leaderState = quorum.<T>maybeLeaderState().orElseThrow(() -> new NotLeaderException("Append failed because the replication is not the current leader"));
BatchAccumulator<T> accumulator = leaderState.accumulator();
boolean isFirstAppend = accumulator.isEmpty();
final long offset;
if (isAtomic) {
offset = accumulator.appendAtomic(epoch, records);
} else {
offset = accumulator.append(epoch, records);
}
// there are no additional appends.
if (isFirstAppend || accumulator.needsDrain(time.milliseconds())) {
wakeup();
}
return offset;
}
use of org.apache.kafka.raft.errors.NotLeaderException in project kafka by apache.
the class BatchAccumulator method append.
private long append(int epoch, List<T> records, boolean isAtomic) {
if (epoch < this.epoch) {
throw new NotLeaderException("Append failed because the epoch doesn't match");
} else if (epoch > this.epoch) {
throw new IllegalArgumentException("Attempt to append from epoch " + epoch + " which is larger than the current epoch " + this.epoch);
}
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
appendLock.lock();
try {
maybeCompleteDrain();
BatchBuilder<T> batch = null;
if (isAtomic) {
batch = maybeAllocateBatch(records, serializationCache);
}
for (T record : records) {
if (!isAtomic) {
batch = maybeAllocateBatch(Collections.singleton(record), serializationCache);
}
if (batch == null) {
throw new BufferAllocationException("Append failed because we failed to allocate memory to write the batch");
}
batch.appendRecord(record, serializationCache);
nextOffset += 1;
}
maybeResetLinger();
return nextOffset - 1;
} finally {
appendLock.unlock();
}
}
Aggregations