Search in sources :

Example 26 with ByteBufferOutputStream

use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.

the class DefaultRecordTest method testSerdeNoSequence.

@Test
public void testSerdeNoSequence() throws IOException {
    ByteBuffer key = ByteBuffer.wrap("hi".getBytes());
    ByteBuffer value = ByteBuffer.wrap("there".getBytes());
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, key, value, new Header[0]);
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    DefaultRecord record = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, RecordBatch.NO_SEQUENCE, null);
    assertNotNull(record);
    assertEquals(RecordBatch.NO_SEQUENCE, record.sequence());
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 27 with ByteBufferOutputStream

use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.

the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.

@Test
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 8);
    assertThrows(InvalidRecordException.class, () -> DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null));
}
Also used : RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 28 with ByteBufferOutputStream

use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.

the class MemoryRecords method filterTo.

/**
 * Note: This method is also used to convert the first timestamp of the batch (which is usually the timestamp of the first record)
 * to the delete horizon of the tombstones or txn markers which are present in the batch.
 */
private static FilterResult filterTo(TopicPartition partition, Iterable<MutableRecordBatch> batches, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) {
    FilterResult filterResult = new FilterResult(destinationBuffer);
    ByteBufferOutputStream bufferOutputStream = new ByteBufferOutputStream(destinationBuffer);
    for (MutableRecordBatch batch : batches) {
        final BatchRetentionResult batchRetentionResult = filter.checkBatchRetention(batch);
        final boolean containsMarkerForEmptyTxn = batchRetentionResult.containsMarkerForEmptyTxn;
        final BatchRetention batchRetention = batchRetentionResult.batchRetention;
        filterResult.bytesRead += batch.sizeInBytes();
        if (batchRetention == BatchRetention.DELETE)
            continue;
        // We use the absolute offset to decide whether to retain the message or not. Due to KAFKA-4298, we have to
        // allow for the possibility that a previous version corrupted the log by writing a compressed record batch
        // with a magic value not matching the magic of the records (magic < 2). This will be fixed as we
        // recopy the messages to the destination buffer.
        byte batchMagic = batch.magic();
        List<Record> retainedRecords = new ArrayList<>();
        final BatchFilterResult iterationResult = filterBatch(batch, decompressionBufferSupplier, filterResult, filter, batchMagic, true, retainedRecords);
        boolean containsTombstones = iterationResult.containsTombstones;
        boolean writeOriginalBatch = iterationResult.writeOriginalBatch;
        long maxOffset = iterationResult.maxOffset;
        if (!retainedRecords.isEmpty()) {
            // we check if the delete horizon should be set to a new value
            // in which case, we need to reset the base timestamp and overwrite the timestamp deltas
            // if the batch does not contain tombstones, then we don't need to overwrite batch
            boolean needToSetDeleteHorizon = batch.magic() >= RecordBatch.MAGIC_VALUE_V2 && (containsTombstones || containsMarkerForEmptyTxn) && !batch.deleteHorizonMs().isPresent();
            if (writeOriginalBatch && !needToSetDeleteHorizon) {
                batch.writeTo(bufferOutputStream);
                filterResult.updateRetainedBatchMetadata(batch, retainedRecords.size(), false);
            } else {
                final MemoryRecordsBuilder builder;
                long deleteHorizonMs;
                if (needToSetDeleteHorizon)
                    deleteHorizonMs = filter.currentTime + filter.deleteRetentionMs;
                else
                    deleteHorizonMs = batch.deleteHorizonMs().orElse(RecordBatch.NO_TIMESTAMP);
                builder = buildRetainedRecordsInto(batch, retainedRecords, bufferOutputStream, deleteHorizonMs);
                MemoryRecords records = builder.build();
                int filteredBatchSize = records.sizeInBytes();
                if (filteredBatchSize > batch.sizeInBytes() && filteredBatchSize > maxRecordBatchSize)
                    log.warn("Record batch from {} with last offset {} exceeded max record batch size {} after cleaning " + "(new size is {}). Consumers with version earlier than 0.10.1.0 may need to " + "increase their fetch sizes.", partition, batch.lastOffset(), maxRecordBatchSize, filteredBatchSize);
                MemoryRecordsBuilder.RecordsInfo info = builder.info();
                filterResult.updateRetainedBatchMetadata(info.maxTimestamp, info.shallowOffsetOfMaxTimestamp, maxOffset, retainedRecords.size(), filteredBatchSize);
            }
        } else if (batchRetention == BatchRetention.RETAIN_EMPTY) {
            if (batchMagic < RecordBatch.MAGIC_VALUE_V2)
                throw new IllegalStateException("Empty batches are only supported for magic v2 and above");
            bufferOutputStream.ensureRemaining(DefaultRecordBatch.RECORD_BATCH_OVERHEAD);
            DefaultRecordBatch.writeEmptyHeader(bufferOutputStream.buffer(), batchMagic, batch.producerId(), batch.producerEpoch(), batch.baseSequence(), batch.baseOffset(), batch.lastOffset(), batch.partitionLeaderEpoch(), batch.timestampType(), batch.maxTimestamp(), batch.isTransactional(), batch.isControlBatch());
            filterResult.updateRetainedBatchMetadata(batch, 0, true);
        }
        // If we had to allocate a new buffer to fit the filtered buffer (see KAFKA-5316), return early to
        // avoid the need for additional allocations.
        ByteBuffer outputBuffer = bufferOutputStream.buffer();
        if (outputBuffer != destinationBuffer) {
            filterResult.outputBuffer = outputBuffer;
            return filterResult;
        }
    }
    return filterResult;
}
Also used : BatchRetention(org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) BatchRetentionResult(org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetentionResult) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) SnapshotFooterRecord(org.apache.kafka.common.message.SnapshotFooterRecord) SnapshotHeaderRecord(org.apache.kafka.common.message.SnapshotHeaderRecord)

Example 29 with ByteBufferOutputStream

use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.

the class MemoryRecords method withRecords.

public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, TimestampType timestampType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, boolean isTransactional, SimpleRecord... records) {
    if (records.length == 0)
        return MemoryRecords.EMPTY;
    int sizeEstimate = AbstractRecords.estimateSizeInBytes(magic, compressionType, Arrays.asList(records));
    ByteBufferOutputStream bufferStream = new ByteBufferOutputStream(sizeEstimate);
    long logAppendTime = RecordBatch.NO_TIMESTAMP;
    if (timestampType == TimestampType.LOG_APPEND_TIME)
        logAppendTime = System.currentTimeMillis();
    MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferStream, magic, compressionType, timestampType, initialOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch, sizeEstimate);
    for (SimpleRecord record : records) builder.append(record);
    return builder.build();
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream)

Example 30 with ByteBufferOutputStream

use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.

the class FetcherTest method testParseCorruptedRecord.

@Test
public void testParseCorruptedRecord() throws Exception {
    buildFetcher();
    assignFromUser(singleton(tp0));
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
    byte magic = RecordBatch.MAGIC_VALUE_V1;
    byte[] key = "foo".getBytes();
    byte[] value = "baz".getBytes();
    long offset = 0;
    long timestamp = 500L;
    int size = LegacyRecord.recordSize(magic, key.length, value.length);
    byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME);
    long crc = LegacyRecord.computeChecksum(magic, attributes, timestamp, key, value);
    // write one valid record
    out.writeLong(offset);
    out.writeInt(size);
    LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
    // and one invalid record (note the crc)
    out.writeLong(offset + 1);
    out.writeInt(size);
    LegacyRecord.write(out, magic, crc + 1, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
    // write one valid record
    out.writeLong(offset + 2);
    out.writeInt(size);
    LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
    // Write a record whose size field is invalid.
    out.writeLong(offset + 3);
    out.writeInt(1);
    // write one valid record
    out.writeLong(offset + 4);
    out.writeInt(size);
    LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
    buffer.flip();
    subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0)));
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    // the first fetchedRecords() should return the first valid message
    assertEquals(1, fetchedRecords().get(tp0).size());
    assertEquals(1, subscriptions.position(tp0).offset);
    ensureBlockOnRecord(1L);
    seekAndConsumeRecord(buffer, 2L);
    ensureBlockOnRecord(3L);
    try {
        // For a record that cannot be retrieved from the iterator, we cannot seek over it within the batch.
        seekAndConsumeRecord(buffer, 4L);
        fail("Should have thrown exception when fail to retrieve a record from iterator.");
    } catch (KafkaException ke) {
    // let it go
    }
    ensureBlockOnRecord(4L);
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) KafkaException(org.apache.kafka.common.KafkaException) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Aggregations

ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)31 ByteBuffer (java.nio.ByteBuffer)26 DataOutputStream (java.io.DataOutputStream)20 Test (org.junit.Test)13 Test (org.junit.jupiter.api.Test)10 KafkaException (org.apache.kafka.common.KafkaException)8 Header (org.apache.kafka.common.header.Header)6 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)6 IOException (java.io.IOException)3 OutputStream (java.io.OutputStream)3 ArrayList (java.util.ArrayList)2 KafkaLZ4BlockInputStream (org.apache.kafka.common.compress.KafkaLZ4BlockInputStream)2 KafkaLZ4BlockOutputStream (org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream)2 BatchRetention (org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention)2 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)2 SnapshotFooterRecord (org.apache.kafka.common.message.SnapshotFooterRecord)1 SnapshotHeaderRecord (org.apache.kafka.common.message.SnapshotHeaderRecord)1 BatchRetentionResult (org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetentionResult)1 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)1 ArgumentsSource (org.junit.jupiter.params.provider.ArgumentsSource)1