use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class SimpleLegacyRecordTest method testCompressedIterationWithEmptyRecords.
@Test
public void testCompressedIterationWithEmptyRecords() throws Exception {
ByteBuffer emptyCompressedValue = ByteBuffer.allocate(64);
OutputStream gzipOutput = CompressionType.GZIP.wrapForOutput(new ByteBufferOutputStream(emptyCompressedValue), RecordBatch.MAGIC_VALUE_V1);
gzipOutput.close();
emptyCompressedValue.flip();
ByteBuffer buffer = ByteBuffer.allocate(128);
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
AbstractLegacyRecordBatch.writeHeader(out, 0L, LegacyRecord.RECORD_OVERHEAD_V1 + emptyCompressedValue.remaining());
LegacyRecord.write(out, RecordBatch.MAGIC_VALUE_V1, 1L, null, Utils.toArray(emptyCompressedValue), CompressionType.GZIP, TimestampType.CREATE_TIME);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assertThrows(InvalidRecordException.class, () -> records.records().iterator().hasNext());
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class DefaultRecordTest method testBasicSerde.
@Test
public void testBasicSerde() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord("hi".getBytes(), "there".getBytes()), new SimpleRecord(null, "there".getBytes()), new SimpleRecord("hi".getBytes(), null), new SimpleRecord(null, null), new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers) };
for (SimpleRecord record : records) {
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
assertNotNull(logRecord);
assertEquals(baseOffset + offsetDelta, logRecord.offset());
assertEquals(baseSequence + offsetDelta, logRecord.sequence());
assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
assertEquals(record.key(), logRecord.key());
assertEquals(record.value(), logRecord.value());
assertArrayEquals(record.headers(), logRecord.headers());
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooLow.
@Test
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 4);
assertThrows(InvalidRecordException.class, () -> DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null));
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class CompressionTypeTest method testLZ4FramingMagicV1.
@Test
public void testLZ4FramingMagicV1() {
ByteBuffer buffer = ByteBuffer.allocate(256);
KafkaLZ4BlockOutputStream out = (KafkaLZ4BlockOutputStream) CompressionType.LZ4.wrapForOutput(new ByteBufferOutputStream(buffer), RecordBatch.MAGIC_VALUE_V1);
assertFalse(out.useBrokenFlagDescriptorChecksum());
buffer.rewind();
KafkaLZ4BlockInputStream in = (KafkaLZ4BlockInputStream) CompressionType.LZ4.wrapForInput(buffer, RecordBatch.MAGIC_VALUE_V1, BufferSupplier.create());
assertFalse(in.ignoreFlagDescriptorChecksum());
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class CompressionTypeTest method testLZ4FramingMagicV0.
@Test
public void testLZ4FramingMagicV0() {
ByteBuffer buffer = ByteBuffer.allocate(256);
KafkaLZ4BlockOutputStream out = (KafkaLZ4BlockOutputStream) CompressionType.LZ4.wrapForOutput(new ByteBufferOutputStream(buffer), RecordBatch.MAGIC_VALUE_V0);
assertTrue(out.useBrokenFlagDescriptorChecksum());
buffer.rewind();
KafkaLZ4BlockInputStream in = (KafkaLZ4BlockInputStream) CompressionType.LZ4.wrapForInput(buffer, RecordBatch.MAGIC_VALUE_V0, BufferSupplier.NO_CACHING);
assertTrue(in.ignoreFlagDescriptorChecksum());
}
Aggregations