Search in sources :

Example 51 with EnumSource

use of org.junit.jupiter.params.provider.EnumSource in project flink by apache.

the class PulsarSinkITCase method writeRecordsToPulsar.

@ParameterizedTest
@EnumSource(DeliveryGuarantee.class)
void writeRecordsToPulsar(DeliveryGuarantee guarantee) throws Exception {
    // A random topic with partition 1.
    String topic = randomAlphabetic(8);
    operator().createTopic(topic, 4);
    int counts = ThreadLocalRandom.current().nextInt(100, 200);
    ControlSource source = new ControlSource(sharedObjects, operator(), topic, guarantee, counts, Duration.ofMinutes(5));
    PulsarSink<String> sink = PulsarSink.builder().setServiceUrl(operator().serviceUrl()).setAdminUrl(operator().adminUrl()).setDeliveryGuarantee(guarantee).setTopics(topic).setSerializationSchema(flinkSchema(new SimpleStringSchema())).build();
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(PARALLELISM);
    env.enableCheckpointing(100L);
    env.addSource(source).sinkTo(sink);
    env.execute();
    List<String> expectedRecords = source.getExpectedRecords();
    List<String> consumedRecords = source.getConsumedRecords();
    assertThat(consumedRecords).hasSameSizeAs(expectedRecords).containsExactlyInAnyOrderElementsOf(expectedRecords);
}
Also used : ControlSource(org.apache.flink.connector.pulsar.testutils.function.ControlSource) SimpleStringSchema(org.apache.flink.api.common.serialization.SimpleStringSchema) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 52 with EnumSource

use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.

the class NodeApiVersionsTest method testUsableVersionLatestVersions.

@ParameterizedTest
@EnumSource(ApiMessageType.ListenerType.class)
public void testUsableVersionLatestVersions(ApiMessageType.ListenerType scope) {
    ApiVersionsResponse defaultResponse = ApiVersionsResponse.defaultApiVersionsResponse(scope);
    List<ApiVersion> versionList = new LinkedList<>(defaultResponse.data().apiKeys());
    // Add an API key that we don't know about.
    versionList.add(new ApiVersion().setApiKey((short) 100).setMinVersion((short) 0).setMaxVersion((short) 1));
    NodeApiVersions versions = new NodeApiVersions(versionList);
    for (ApiKeys apiKey : ApiKeys.apisForListener(scope)) {
        assertEquals(apiKey.latestVersion(), versions.latestUsableVersion(apiKey));
    }
}
Also used : ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) LinkedList(java.util.LinkedList) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 53 with EnumSource

use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.

the class MemoryRecordsBuilderTest method convertV2ToV1UsingMixedCreateAndLogAppendTime.

@ParameterizedTest
@EnumSource(CompressionType.class)
public void convertV2ToV1UsingMixedCreateAndLogAppendTime(CompressionType compressionType) {
    ByteBuffer buffer = ByteBuffer.allocate(512);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compressionType, TimestampType.LOG_APPEND_TIME, 0L);
    builder.append(10L, "1".getBytes(), "a".getBytes());
    builder.close();
    int sizeExcludingTxnMarkers = buffer.position();
    MemoryRecords.writeEndTransactionalMarker(buffer, 1L, System.currentTimeMillis(), 0, 15L, (short) 0, new EndTransactionMarker(ControlRecordType.ABORT, 0));
    int position = buffer.position();
    builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 1L);
    builder.append(12L, "2".getBytes(), "b".getBytes());
    builder.append(13L, "3".getBytes(), "c".getBytes());
    builder.close();
    sizeExcludingTxnMarkers += buffer.position() - position;
    MemoryRecords.writeEndTransactionalMarker(buffer, 14L, System.currentTimeMillis(), 0, 1L, (short) 0, new EndTransactionMarker(ControlRecordType.COMMIT, 0));
    buffer.flip();
    Supplier<ConvertedRecords<MemoryRecords>> convertedRecordsSupplier = () -> MemoryRecords.readableRecords(buffer).downConvert(MAGIC_VALUE_V1, 0, time);
    if (compressionType != CompressionType.ZSTD) {
        ConvertedRecords<MemoryRecords> convertedRecords = convertedRecordsSupplier.get();
        MemoryRecords records = convertedRecords.records();
        // Transactional markers are skipped when down converting to V1, so exclude them from size
        verifyRecordsProcessingStats(compressionType, convertedRecords.recordConversionStats(), 3, 3, records.sizeInBytes(), sizeExcludingTxnMarkers);
        List<? extends RecordBatch> batches = Utils.toList(records.batches().iterator());
        if (compressionType != CompressionType.NONE) {
            assertEquals(2, batches.size());
            assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType());
            assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType());
        } else {
            assertEquals(3, batches.size());
            assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType());
            assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType());
            assertEquals(TimestampType.CREATE_TIME, batches.get(2).timestampType());
        }
        List<Record> logRecords = Utils.toList(records.records().iterator());
        assertEquals(3, logRecords.size());
        assertEquals(ByteBuffer.wrap("1".getBytes()), logRecords.get(0).key());
        assertEquals(ByteBuffer.wrap("2".getBytes()), logRecords.get(1).key());
        assertEquals(ByteBuffer.wrap("3".getBytes()), logRecords.get(2).key());
    } else {
        Exception e = assertThrows(UnsupportedCompressionTypeException.class, convertedRecordsSupplier::get);
        assertEquals("Down-conversion of zstandard-compressed batches is not supported", e.getMessage());
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) UnsupportedCompressionTypeException(org.apache.kafka.common.errors.UnsupportedCompressionTypeException) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 54 with EnumSource

use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.

the class ApiVersionsResponseTest method shouldHaveCorrectDefaultApiVersionsResponse.

@ParameterizedTest
@EnumSource(ApiMessageType.ListenerType.class)
public void shouldHaveCorrectDefaultApiVersionsResponse(ApiMessageType.ListenerType scope) {
    ApiVersionsResponse defaultResponse = ApiVersionsResponse.defaultApiVersionsResponse(scope);
    assertEquals(ApiKeys.apisForListener(scope).size(), defaultResponse.data().apiKeys().size(), "API versions for all API keys must be maintained.");
    for (ApiKeys key : ApiKeys.apisForListener(scope)) {
        ApiVersion version = defaultResponse.apiVersion(key.id);
        assertNotNull(version, "Could not find ApiVersion for API " + key.name);
        assertEquals(version.minVersion(), key.oldestVersion(), "Incorrect min version for Api " + key.name);
        assertEquals(version.maxVersion(), key.latestVersion(), "Incorrect max version for Api " + key.name);
        // Check if versions less than min version are indeed set as null, i.e., deprecated.
        for (int i = 0; i < version.minVersion(); ++i) {
            assertNull(key.messageType.requestSchemas()[i], "Request version " + i + " for API " + version.apiKey() + " must be null");
            assertNull(key.messageType.responseSchemas()[i], "Response version " + i + " for API " + version.apiKey() + " must be null");
        }
        // Check if versions between min and max versions are non null, i.e., valid.
        for (int i = version.minVersion(); i <= version.maxVersion(); ++i) {
            assertNotNull(key.messageType.requestSchemas()[i], "Request version " + i + " for API " + version.apiKey() + " must not be null");
            assertNotNull(key.messageType.responseSchemas()[i], "Response version " + i + " for API " + version.apiKey() + " must not be null");
        }
    }
    assertTrue(defaultResponse.data().supportedFeatures().isEmpty());
    assertTrue(defaultResponse.data().finalizedFeatures().isEmpty());
    assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, defaultResponse.data().finalizedFeaturesEpoch());
}
Also used : ApiKeys(org.apache.kafka.common.protocol.ApiKeys) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 55 with EnumSource

use of org.junit.jupiter.params.provider.EnumSource in project kafka by apache.

the class BatchBuilderTest method testBuildBatch.

@ParameterizedTest
@EnumSource(CompressionType.class)
void testBuildBatch(CompressionType compressionType) {
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long baseOffset = 57;
    long logAppendTime = time.milliseconds();
    boolean isControlBatch = false;
    int leaderEpoch = 15;
    BatchBuilder<String> builder = new BatchBuilder<>(buffer, serde, compressionType, baseOffset, logAppendTime, isControlBatch, leaderEpoch, buffer.limit());
    List<String> records = Arrays.asList("a", "ap", "app", "appl", "apple");
    records.forEach(record -> builder.appendRecord(record, null));
    MemoryRecords builtRecordSet = builder.build();
    assertTrue(builder.bytesNeeded(Arrays.asList("a"), null).isPresent());
    assertThrows(IllegalStateException.class, () -> builder.appendRecord("a", null));
    List<MutableRecordBatch> builtBatches = Utils.toList(builtRecordSet.batchIterator());
    assertEquals(1, builtBatches.size());
    assertEquals(records, builder.records());
    MutableRecordBatch batch = builtBatches.get(0);
    assertEquals(5, batch.countOrNull());
    assertEquals(compressionType, batch.compressionType());
    assertEquals(baseOffset, batch.baseOffset());
    assertEquals(logAppendTime, batch.maxTimestamp());
    assertEquals(isControlBatch, batch.isControlBatch());
    assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
    List<String> builtRecords = Utils.toList(batch).stream().map(record -> Utils.utf8(record.value())).collect(Collectors.toList());
    assertEquals(records, builtRecords);
}
Also used : Utils(org.apache.kafka.common.utils.Utils) ValueSource(org.junit.jupiter.params.provider.ValueSource) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CompressionType(org.apache.kafka.common.record.CompressionType) EnumSource(org.junit.jupiter.params.provider.EnumSource) Collectors(java.util.stream.Collectors) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

EnumSource (org.junit.jupiter.params.provider.EnumSource)398 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)394 Account (io.nem.symbol.sdk.model.account.Account)40 TransferTransaction (io.nem.symbol.sdk.model.transaction.TransferTransaction)38 SignedTransaction (io.nem.symbol.sdk.model.transaction.SignedTransaction)34 Transaction (io.nem.symbol.sdk.model.transaction.Transaction)34 Address (io.nem.symbol.sdk.model.account.Address)30 AggregateTransaction (io.nem.symbol.sdk.model.transaction.AggregateTransaction)27 TransactionRepository (io.nem.symbol.sdk.api.TransactionRepository)26 TransactionSearchCriteria (io.nem.symbol.sdk.api.TransactionSearchCriteria)26 BigInteger (java.math.BigInteger)25 MosaicId (io.nem.symbol.sdk.model.mosaic.MosaicId)23 KernelTransaction (org.neo4j.kernel.api.KernelTransaction)23 EntityUpdates (org.neo4j.storageengine.api.EntityUpdates)21 RepositoryFactory (io.nem.symbol.sdk.api.RepositoryFactory)19 Path (java.nio.file.Path)18 ArrayList (java.util.ArrayList)16 Listener (io.nem.symbol.sdk.api.Listener)15 PlainMessage (io.nem.symbol.sdk.model.message.PlainMessage)15 NamespaceId (io.nem.symbol.sdk.model.namespace.NamespaceId)14