Search in sources :

Example 36 with ValueSource

use of org.junit.jupiter.params.provider.ValueSource in project kafka by apache.

the class KafkaProducerTest method testMetadataExpiry.

@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataExpiry(boolean isIdempotenceEnabled) throws InterruptedException {
    Map<String, Object> configs = new HashMap<>();
    configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, isIdempotenceEnabled);
    ProducerMetadata metadata = mock(ProducerMetadata.class);
    when(metadata.fetch()).thenReturn(onePartitionCluster, emptyCluster, onePartitionCluster);
    KafkaProducer<String, String> producer = producerWithOverrideNewSender(configs, metadata);
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
    producer.send(record);
    // Verify the topic's metadata isn't requested since it's already present.
    verify(metadata, times(0)).requestUpdateForTopic(topic);
    verify(metadata, times(0)).awaitUpdate(anyInt(), anyLong());
    verify(metadata, times(1)).fetch();
    // The metadata has been expired. Verify the producer requests the topic's metadata.
    producer.send(record, null);
    verify(metadata, times(1)).requestUpdateForTopic(topic);
    verify(metadata, times(1)).awaitUpdate(anyInt(), anyLong());
    verify(metadata, times(3)).fetch();
    producer.close(Duration.ofMillis(0));
}
Also used : ProducerMetadata(org.apache.kafka.clients.producer.internals.ProducerMetadata) HashMap(java.util.HashMap) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 37 with ValueSource

use of org.junit.jupiter.params.provider.ValueSource in project kafka by apache.

the class StickyAssignorTest method testAssignmentWithConflictingPreviousGenerations.

@ParameterizedTest(name = "testAssignmentWithConflictingPreviousGenerations with isAllSubscriptionsEqual: {0}")
@ValueSource(booleans = { true, false })
public void testAssignmentWithConflictingPreviousGenerations(boolean isAllSubscriptionsEqual) {
    Map<String, Integer> partitionsPerTopic = new HashMap<>();
    partitionsPerTopic.put(topic, 4);
    partitionsPerTopic.put(topic2, 4);
    partitionsPerTopic.put(topic3, 4);
    List<String> allTopics = topics(topic, topic2, topic3);
    List<String> consumer1SubscribedTopics = isAllSubscriptionsEqual ? allTopics : topics(topic);
    List<String> consumer2SubscribedTopics = isAllSubscriptionsEqual ? allTopics : topics(topic, topic2);
    subscriptions.put(consumer1, new Subscription(consumer1SubscribedTopics));
    subscriptions.put(consumer2, new Subscription(consumer2SubscribedTopics));
    subscriptions.put(consumer3, new Subscription(allTopics));
    TopicPartition tp0 = new TopicPartition(topic, 0);
    TopicPartition tp1 = new TopicPartition(topic, 1);
    TopicPartition tp2 = new TopicPartition(topic, 2);
    TopicPartition tp3 = new TopicPartition(topic, 3);
    TopicPartition t2p0 = new TopicPartition(topic2, 0);
    TopicPartition t2p1 = new TopicPartition(topic2, 1);
    TopicPartition t2p2 = new TopicPartition(topic2, 2);
    TopicPartition t2p3 = new TopicPartition(topic2, 3);
    TopicPartition t3p0 = new TopicPartition(topic3, 0);
    TopicPartition t3p1 = new TopicPartition(topic3, 1);
    TopicPartition t3p2 = new TopicPartition(topic3, 2);
    TopicPartition t3p3 = new TopicPartition(topic3, 3);
    List<TopicPartition> c1partitions0 = isAllSubscriptionsEqual ? partitions(tp0, tp1, tp2, t2p2, t2p3, t3p0) : partitions(tp0, tp1, tp2, tp3);
    List<TopicPartition> c2partitions0 = partitions(tp0, tp1, t2p0, t2p1, t2p2, t2p3);
    List<TopicPartition> c3partitions0 = partitions(tp2, tp3, t3p0, t3p1, t3p2, t3p3);
    subscriptions.put(consumer1, buildSubscriptionWithGeneration(consumer1SubscribedTopics, c1partitions0, 1));
    subscriptions.put(consumer2, buildSubscriptionWithGeneration(consumer2SubscribedTopics, c2partitions0, 2));
    subscriptions.put(consumer3, buildSubscriptionWithGeneration(allTopics, c3partitions0, 2));
    Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions);
    List<TopicPartition> c1partitions = assignment.get(consumer1);
    List<TopicPartition> c2partitions = assignment.get(consumer2);
    List<TopicPartition> c3partitions = assignment.get(consumer3);
    assertTrue(c1partitions.size() == 4 && c2partitions.size() == 4 && c3partitions.size() == 4);
    assertTrue(c2partitions0.containsAll(c2partitions));
    assertTrue(c3partitions0.containsAll(c3partitions));
    verifyValidityAndBalance(subscriptions, assignment, partitionsPerTopic);
    assertTrue(isFullyBalanced(assignment));
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyList(java.util.Collections.emptyList) ArrayList(java.util.ArrayList) List(java.util.List) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 38 with ValueSource

use of org.junit.jupiter.params.provider.ValueSource in project kafka by apache.

the class KafkaAdminClientTest method testDescribeProducersTimeout.

@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testDescribeProducersTimeout(boolean timeoutInMetadataLookup) throws Exception {
    MockTime time = new MockTime();
    try (AdminClientUnitTestEnv env = mockClientEnv(time)) {
        TopicPartition topicPartition = new TopicPartition("foo", 0);
        int requestTimeoutMs = 15000;
        if (!timeoutInMetadataLookup) {
            Node leader = env.cluster().nodes().iterator().next();
            expectMetadataRequest(env, topicPartition, leader);
        }
        DescribeProducersOptions options = new DescribeProducersOptions().timeoutMs(requestTimeoutMs);
        DescribeProducersResult result = env.adminClient().describeProducers(singleton(topicPartition), options);
        assertFalse(result.all().isDone());
        time.sleep(requestTimeoutMs);
        TestUtils.waitForCondition(() -> result.all().isDone(), "Future failed to timeout after expiration of timeout");
        assertTrue(result.all().isCompletedExceptionally());
        TestUtils.assertFutureThrows(result.all(), TimeoutException.class);
        assertFalse(env.kafkaClient().hasInFlightRequests());
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) MockTime(org.apache.kafka.common.utils.MockTime) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 39 with ValueSource

use of org.junit.jupiter.params.provider.ValueSource in project flink by apache.

the class SinkWriterOperatorTest method testStateRestore.

@ParameterizedTest
@ValueSource(booleans = { true, false })
void testStateRestore(boolean stateful) throws Exception {
    final long initialTime = 0;
    final SnapshottingBufferingSinkWriter snapshottingWriter = new SnapshottingBufferingSinkWriter();
    final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> testHarness = createTestHarnessWithBufferingSinkWriter(snapshottingWriter, stateful);
    testHarness.open();
    testHarness.processWatermark(initialTime);
    testHarness.processElement(1, initialTime + 1);
    testHarness.processElement(2, initialTime + 2);
    testHarness.prepareSnapshotPreBarrier(1L);
    OperatorSubtaskState snapshot = testHarness.snapshot(1L, 1L);
    // we see the watermark and the committable summary, so the committables must be stored in
    // state
    assertThat(testHarness.getOutput()).hasSize(2).contains(new Watermark(initialTime));
    assertThat(snapshottingWriter.lastCheckpointId).isEqualTo(stateful ? 1L : SnapshottingBufferingSinkWriter.NOT_SNAPSHOTTED);
    testHarness.close();
    final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> restoredTestHarness = createTestHarnessWithBufferingSinkWriter(new SnapshottingBufferingSinkWriter(), stateful);
    restoredTestHarness.initializeState(snapshot);
    restoredTestHarness.open();
    // this will flush out the committables that were restored
    restoredTestHarness.endInput();
    final long checkpointId = 2;
    restoredTestHarness.prepareSnapshotPreBarrier(checkpointId);
    if (stateful) {
        assertBasicOutput(restoredTestHarness.getOutput(), 2, checkpointId);
    } else {
        assertThat(fromOutput(restoredTestHarness.getOutput()).get(0).asRecord().getValue()).isInstanceOf(CommittableSummary.class).satisfies(cs -> SinkV2Assertions.assertThat((CommittableSummary<?>) cs).hasOverallCommittables(0).hasPendingCommittables(0).hasFailedCommittables(0));
    }
    restoredTestHarness.close();
}
Also used : CommittableMessage(org.apache.flink.streaming.api.connector.sink2.CommittableMessage) CommittableSummary(org.apache.flink.streaming.api.connector.sink2.CommittableSummary) Watermark(org.apache.flink.streaming.api.watermark.Watermark) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 40 with ValueSource

use of org.junit.jupiter.params.provider.ValueSource in project kafka by apache.

the class BatchBuilderTest method testHasRoomForUncompressed.

@ParameterizedTest
@ValueSource(ints = { 128, 157, 256, 433, 512, 777, 1024 })
public void testHasRoomForUncompressed(int batchSize) {
    ByteBuffer buffer = ByteBuffer.allocate(batchSize);
    long baseOffset = 57;
    long logAppendTime = time.milliseconds();
    boolean isControlBatch = false;
    int leaderEpoch = 15;
    BatchBuilder<String> builder = new BatchBuilder<>(buffer, serde, CompressionType.NONE, baseOffset, logAppendTime, isControlBatch, leaderEpoch, buffer.limit());
    String record = "i am a record";
    while (!builder.bytesNeeded(Arrays.asList(record), null).isPresent()) {
        builder.appendRecord(record, null);
    }
    // Approximate size should be exact when compression is not used
    int sizeInBytes = builder.approximateSizeInBytes();
    MemoryRecords records = builder.build();
    assertEquals(sizeInBytes, records.sizeInBytes());
    assertTrue(sizeInBytes <= batchSize, "Built batch size " + sizeInBytes + " is larger than max batch size " + batchSize);
}
Also used : ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)266 ValueSource (org.junit.jupiter.params.provider.ValueSource)266 HashSet (java.util.HashSet)23 HistogramTestUtils.constructDoubleHistogram (org.HdrHistogram.HistogramTestUtils.constructDoubleHistogram)23 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)20 ApiResponse (org.hisp.dhis.dto.ApiResponse)15 UpdateModel (com.synopsys.integration.alert.update.model.UpdateModel)13 File (java.io.File)13 List (java.util.List)13 OffsetDateTime (java.time.OffsetDateTime)10 Map (java.util.Map)10 TimeUnit (java.util.concurrent.TimeUnit)10 TopicPartition (org.apache.kafka.common.TopicPartition)9 ListenerSubscribeMessage (io.nem.symbol.sdk.infrastructure.ListenerSubscribeMessage)8 UnresolvedAddress (io.nem.symbol.sdk.model.account.UnresolvedAddress)8 ZooKeeper (org.apache.zookeeper.ZooKeeper)8 JsonObject (com.google.gson.JsonObject)7 IOException (java.io.IOException)7 CountDownLatch (java.util.concurrent.CountDownLatch)7