use of org.junit.jupiter.params.provider.EnumSource in project flink by apache.
the class PulsarSinkITCase method writeRecordsToPulsar.
@ParameterizedTest
@EnumSource(DeliveryGuarantee.class)
void writeRecordsToPulsar(DeliveryGuarantee guarantee) throws Exception {
// A random topic with partition 1.
String topic = randomAlphabetic(8);
operator().createTopic(topic, 4);
int counts = ThreadLocalRandom.current().nextInt(100, 200);
ControlSource source = new ControlSource(sharedObjects, operator(), topic, guarantee, counts, Duration.ofMinutes(5));
PulsarSink<String> sink = PulsarSink.builder().setServiceUrl(operator().serviceUrl()).setAdminUrl(operator().adminUrl()).setDeliveryGuarantee(guarantee).setTopics(topic).setSerializationSchema(flinkSchema(new SimpleStringSchema())).build();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(PARALLELISM);
env.enableCheckpointing(100L);
env.addSource(source).sinkTo(sink);
env.execute();
List<String> expectedRecords = source.getExpectedRecords();
List<String> consumedRecords = source.getConsumedRecords();
assertThat(consumedRecords).hasSameSizeAs(expectedRecords).containsExactlyInAnyOrderElementsOf(expectedRecords);
}
use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.
the class NodeApiVersionsTest method testUsableVersionLatestVersions.
@ParameterizedTest
@EnumSource(ApiMessageType.ListenerType.class)
public void testUsableVersionLatestVersions(ApiMessageType.ListenerType scope) {
ApiVersionsResponse defaultResponse = ApiVersionsResponse.defaultApiVersionsResponse(scope);
List<ApiVersion> versionList = new LinkedList<>(defaultResponse.data().apiKeys());
// Add an API key that we don't know about.
versionList.add(new ApiVersion().setApiKey((short) 100).setMinVersion((short) 0).setMaxVersion((short) 1));
NodeApiVersions versions = new NodeApiVersions(versionList);
for (ApiKeys apiKey : ApiKeys.apisForListener(scope)) {
assertEquals(apiKey.latestVersion(), versions.latestUsableVersion(apiKey));
}
}
use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.
the class MemoryRecordsBuilderTest method convertV2ToV1UsingMixedCreateAndLogAppendTime.
@ParameterizedTest
@EnumSource(CompressionType.class)
public void convertV2ToV1UsingMixedCreateAndLogAppendTime(CompressionType compressionType) {
ByteBuffer buffer = ByteBuffer.allocate(512);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compressionType, TimestampType.LOG_APPEND_TIME, 0L);
builder.append(10L, "1".getBytes(), "a".getBytes());
builder.close();
int sizeExcludingTxnMarkers = buffer.position();
MemoryRecords.writeEndTransactionalMarker(buffer, 1L, System.currentTimeMillis(), 0, 15L, (short) 0, new EndTransactionMarker(ControlRecordType.ABORT, 0));
int position = buffer.position();
builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 1L);
builder.append(12L, "2".getBytes(), "b".getBytes());
builder.append(13L, "3".getBytes(), "c".getBytes());
builder.close();
sizeExcludingTxnMarkers += buffer.position() - position;
MemoryRecords.writeEndTransactionalMarker(buffer, 14L, System.currentTimeMillis(), 0, 1L, (short) 0, new EndTransactionMarker(ControlRecordType.COMMIT, 0));
buffer.flip();
Supplier<ConvertedRecords<MemoryRecords>> convertedRecordsSupplier = () -> MemoryRecords.readableRecords(buffer).downConvert(MAGIC_VALUE_V1, 0, time);
if (compressionType != CompressionType.ZSTD) {
ConvertedRecords<MemoryRecords> convertedRecords = convertedRecordsSupplier.get();
MemoryRecords records = convertedRecords.records();
// Transactional markers are skipped when down converting to V1, so exclude them from size
verifyRecordsProcessingStats(compressionType, convertedRecords.recordConversionStats(), 3, 3, records.sizeInBytes(), sizeExcludingTxnMarkers);
List<? extends RecordBatch> batches = Utils.toList(records.batches().iterator());
if (compressionType != CompressionType.NONE) {
assertEquals(2, batches.size());
assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType());
assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType());
} else {
assertEquals(3, batches.size());
assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType());
assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType());
assertEquals(TimestampType.CREATE_TIME, batches.get(2).timestampType());
}
List<Record> logRecords = Utils.toList(records.records().iterator());
assertEquals(3, logRecords.size());
assertEquals(ByteBuffer.wrap("1".getBytes()), logRecords.get(0).key());
assertEquals(ByteBuffer.wrap("2".getBytes()), logRecords.get(1).key());
assertEquals(ByteBuffer.wrap("3".getBytes()), logRecords.get(2).key());
} else {
Exception e = assertThrows(UnsupportedCompressionTypeException.class, convertedRecordsSupplier::get);
assertEquals("Down-conversion of zstandard-compressed batches is not supported", e.getMessage());
}
}
use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.
the class ApiVersionsResponseTest method shouldHaveCorrectDefaultApiVersionsResponse.
@ParameterizedTest
@EnumSource(ApiMessageType.ListenerType.class)
public void shouldHaveCorrectDefaultApiVersionsResponse(ApiMessageType.ListenerType scope) {
ApiVersionsResponse defaultResponse = ApiVersionsResponse.defaultApiVersionsResponse(scope);
assertEquals(ApiKeys.apisForListener(scope).size(), defaultResponse.data().apiKeys().size(), "API versions for all API keys must be maintained.");
for (ApiKeys key : ApiKeys.apisForListener(scope)) {
ApiVersion version = defaultResponse.apiVersion(key.id);
assertNotNull(version, "Could not find ApiVersion for API " + key.name);
assertEquals(version.minVersion(), key.oldestVersion(), "Incorrect min version for Api " + key.name);
assertEquals(version.maxVersion(), key.latestVersion(), "Incorrect max version for Api " + key.name);
// Check if versions less than min version are indeed set as null, i.e., deprecated.
for (int i = 0; i < version.minVersion(); ++i) {
assertNull(key.messageType.requestSchemas()[i], "Request version " + i + " for API " + version.apiKey() + " must be null");
assertNull(key.messageType.responseSchemas()[i], "Response version " + i + " for API " + version.apiKey() + " must be null");
}
// Check if versions between min and max versions are non null, i.e., valid.
for (int i = version.minVersion(); i <= version.maxVersion(); ++i) {
assertNotNull(key.messageType.requestSchemas()[i], "Request version " + i + " for API " + version.apiKey() + " must not be null");
assertNotNull(key.messageType.responseSchemas()[i], "Response version " + i + " for API " + version.apiKey() + " must not be null");
}
}
assertTrue(defaultResponse.data().supportedFeatures().isEmpty());
assertTrue(defaultResponse.data().finalizedFeatures().isEmpty());
assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, defaultResponse.data().finalizedFeaturesEpoch());
}
use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.
the class BatchBuilderTest method testBuildBatch.
@ParameterizedTest
@EnumSource(CompressionType.class)
void testBuildBatch(CompressionType compressionType) {
ByteBuffer buffer = ByteBuffer.allocate(1024);
long baseOffset = 57;
long logAppendTime = time.milliseconds();
boolean isControlBatch = false;
int leaderEpoch = 15;
BatchBuilder<String> builder = new BatchBuilder<>(buffer, serde, compressionType, baseOffset, logAppendTime, isControlBatch, leaderEpoch, buffer.limit());
List<String> records = Arrays.asList("a", "ap", "app", "appl", "apple");
records.forEach(record -> builder.appendRecord(record, null));
MemoryRecords builtRecordSet = builder.build();
assertTrue(builder.bytesNeeded(Arrays.asList("a"), null).isPresent());
assertThrows(IllegalStateException.class, () -> builder.appendRecord("a", null));
List<MutableRecordBatch> builtBatches = Utils.toList(builtRecordSet.batchIterator());
assertEquals(1, builtBatches.size());
assertEquals(records, builder.records());
MutableRecordBatch batch = builtBatches.get(0);
assertEquals(5, batch.countOrNull());
assertEquals(compressionType, batch.compressionType());
assertEquals(baseOffset, batch.baseOffset());
assertEquals(logAppendTime, batch.maxTimestamp());
assertEquals(isControlBatch, batch.isControlBatch());
assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
List<String> builtRecords = Utils.toList(batch).stream().map(record -> Utils.utf8(record.value())).collect(Collectors.toList());
assertEquals(records, builtRecords);
}
Aggregations