use of org.apache.kafka.common.utils.ByteBufferOutputStream in project kafka by apache.
the class SimpleRecordTest method testCompressedIterationWithNullValue.
@Test(expected = InvalidRecordException.class)
public void testCompressedIterationWithNullValue() throws Exception {
ByteBuffer buffer = ByteBuffer.allocate(128);
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
LogEntry.writeHeader(out, 0L, Record.RECORD_OVERHEAD_V1);
Record.write(out, Record.CURRENT_MAGIC_VALUE, 1L, null, null, CompressionType.GZIP, TimestampType.CREATE_TIME);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
for (Record record : records.records()) fail("Iteration should have caused invalid record error");
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project apache-kafka-on-k8s by banzaicloud.
the class CompressionTypeTest method testLZ4FramingMagicV1.
@Test
public void testLZ4FramingMagicV1() {
ByteBuffer buffer = ByteBuffer.allocate(256);
KafkaLZ4BlockOutputStream out = (KafkaLZ4BlockOutputStream) CompressionType.LZ4.wrapForOutput(new ByteBufferOutputStream(buffer), RecordBatch.MAGIC_VALUE_V1);
assertFalse(out.useBrokenFlagDescriptorChecksum());
buffer.rewind();
KafkaLZ4BlockInputStream in = (KafkaLZ4BlockInputStream) CompressionType.LZ4.wrapForInput(buffer, RecordBatch.MAGIC_VALUE_V1, BufferSupplier.create());
assertFalse(in.ignoreFlagDescriptorChecksum());
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooLow.
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 4);
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
// force iteration through the record to validate the number of headers
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerde.
@Test
public void testBasicSerde() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord("hi".getBytes(), "there".getBytes()), new SimpleRecord(null, "there".getBytes()), new SimpleRecord("hi".getBytes(), null), new SimpleRecord(null, null), new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers) };
for (SimpleRecord record : records) {
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
assertNotNull(logRecord);
assertEquals(baseOffset + offsetDelta, logRecord.offset());
assertEquals(baseSequence + offsetDelta, logRecord.sequence());
assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
assertEquals(record.key(), logRecord.key());
assertEquals(record.value(), logRecord.value());
assertArrayEquals(record.headers(), logRecord.headers());
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
}
use of org.apache.kafka.common.utils.ByteBufferOutputStream in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 8);
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
// force iteration through the record to validate the number of headers
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
Aggregations