use of org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V0 in project kafka by apache.
the class MemoryRecordsTest method testUnsupportedCompress.
@Test
public void testUnsupportedCompress() {
BiFunction<Byte, CompressionType, MemoryRecords> builderBiFunction = (magic, compressionType) -> MemoryRecords.withRecords(magic, compressionType, new SimpleRecord(10L, "key1".getBytes(), "value1".getBytes()));
Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1).forEach(magic -> {
Exception e = assertThrows(IllegalArgumentException.class, () -> builderBiFunction.apply(magic, CompressionType.ZSTD));
assertEquals(e.getMessage(), "ZStandard compression is not supported for magic " + magic);
});
}
use of org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V0 in project kafka by apache.
the class MemoryRecordsBuilderTest method testUnsupportedCompress.
@Test
public void testUnsupportedCompress() {
BiFunction<Byte, CompressionType, MemoryRecordsBuilder> builderBiFunction = (magic, compressionType) -> new MemoryRecordsBuilder(ByteBuffer.allocate(128), magic, compressionType, TimestampType.CREATE_TIME, 0L, 0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, 128);
Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1).forEach(magic -> {
Exception e = assertThrows(IllegalArgumentException.class, () -> builderBiFunction.apply(magic, CompressionType.ZSTD));
assertEquals(e.getMessage(), "ZStandard compression is not supported for magic " + magic);
});
}
Aggregations