Search in sources :

Example 86 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class RocksDBStoreTest method shouldNotThrowExceptionOnRestoreWhenThereIsPreExistingRocksDbFiles.

@Test
public void shouldNotThrowExceptionOnRestoreWhenThereIsPreExistingRocksDbFiles() {
    rocksDBStore.init((StateStoreContext) context, rocksDBStore);
    rocksDBStore.put(new Bytes("existingKey".getBytes(UTF_8)), "existingValue".getBytes(UTF_8));
    rocksDBStore.flush();
    final List<KeyValue<byte[], byte[]>> restoreBytes = new ArrayList<>();
    final byte[] restoredKey = "restoredKey".getBytes(UTF_8);
    final byte[] restoredValue = "restoredValue".getBytes(UTF_8);
    restoreBytes.add(KeyValue.pair(restoredKey, restoredValue));
    context.restore(DB_NAME, restoreBytes);
    assertThat(stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "restoredKey")))), equalTo("restoredValue"));
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) KeyValue(org.apache.kafka.streams.KeyValue) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 87 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class RocksDBStoreTest method shouldReturnValueOnRange.

@Test
public void shouldReturnValueOnRange() {
    rocksDBStore.init((StateStoreContext) context, rocksDBStore);
    final KeyValue<String, String> kv0 = new KeyValue<>("0", "zero");
    final KeyValue<String, String> kv1 = new KeyValue<>("1", "one");
    final KeyValue<String, String> kv2 = new KeyValue<>("2", "two");
    rocksDBStore.put(new Bytes(kv0.key.getBytes(UTF_8)), kv0.value.getBytes(UTF_8));
    rocksDBStore.put(new Bytes(kv1.key.getBytes(UTF_8)), kv1.value.getBytes(UTF_8));
    rocksDBStore.put(new Bytes(kv2.key.getBytes(UTF_8)), kv2.value.getBytes(UTF_8));
    final LinkedList<KeyValue<String, String>> expectedContents = new LinkedList<>();
    expectedContents.add(kv0);
    expectedContents.add(kv1);
    try (final KeyValueIterator<Bytes, byte[]> iterator = rocksDBStore.range(null, new Bytes(stringSerializer.serialize(null, "1")))) {
        assertEquals(expectedContents, getDeserializedList(iterator));
    }
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) KeyValue(org.apache.kafka.streams.KeyValue) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 88 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class PageViewTypedDemo method main.

public static void main(final String[] args) {
    final Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pageview-typed");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, JsonTimestampExtractor.class);
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, JSONSerde.class);
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, JSONSerde.class);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L);
    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, PageView> views = builder.stream("streams-pageview-input", Consumed.with(Serdes.String(), new JSONSerde<>()));
    final KTable<String, UserProfile> users = builder.table("streams-userprofile-input", Consumed.with(Serdes.String(), new JSONSerde<>()));
    final Duration duration24Hours = Duration.ofHours(24);
    final KStream<WindowedPageViewByRegion, RegionCount> regionCount = views.leftJoin(users, (view, profile) -> {
        final PageViewByRegion viewByRegion = new PageViewByRegion();
        viewByRegion.user = view.user;
        viewByRegion.page = view.page;
        if (profile != null) {
            viewByRegion.region = profile.region;
        } else {
            viewByRegion.region = "UNKNOWN";
        }
        return viewByRegion;
    }).map((user, viewRegion) -> new KeyValue<>(viewRegion.region, viewRegion)).groupByKey(Grouped.with(Serdes.String(), new JSONSerde<>())).windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofDays(7), duration24Hours).advanceBy(Duration.ofSeconds(1))).count().toStream().map((key, value) -> {
        final WindowedPageViewByRegion wViewByRegion = new WindowedPageViewByRegion();
        wViewByRegion.windowStart = key.window().start();
        wViewByRegion.region = key.key();
        final RegionCount rCount = new RegionCount();
        rCount.region = key.key();
        rCount.count = value;
        return new KeyValue<>(wViewByRegion, rCount);
    });
    // write to the result topic
    regionCount.to("streams-pageviewstats-typed-output", Produced.with(new JSONSerde<>(), new JSONSerde<>()));
    final KafkaStreams streams = new KafkaStreams(builder.build(), props);
    final CountDownLatch latch = new CountDownLatch(1);
    // attach shutdown handler to catch control-c
    Runtime.getRuntime().addShutdownHook(new Thread("streams-pipe-shutdown-hook") {

        @Override
        public void run() {
            streams.close();
            latch.countDown();
        }
    });
    try {
        streams.start();
        latch.await();
    } catch (final Throwable e) {
        e.printStackTrace();
        System.exit(1);
    }
    System.exit(0);
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) JsonSubTypes(com.fasterxml.jackson.annotation.JsonSubTypes) Properties(java.util.Properties) Produced(org.apache.kafka.streams.kstream.Produced) SerializationException(org.apache.kafka.common.errors.SerializationException) Consumed(org.apache.kafka.streams.kstream.Consumed) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) KeyValue(org.apache.kafka.streams.KeyValue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) IOException(java.io.IOException) KStream(org.apache.kafka.streams.kstream.KStream) Grouped(org.apache.kafka.streams.kstream.Grouped) CountDownLatch(java.util.concurrent.CountDownLatch) JsonTypeInfo(com.fasterxml.jackson.annotation.JsonTypeInfo) Serde(org.apache.kafka.common.serialization.Serde) Serializer(org.apache.kafka.common.serialization.Serializer) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Duration(java.time.Duration) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) Deserializer(org.apache.kafka.common.serialization.Deserializer) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Duration(java.time.Duration) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder)

Example 89 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class EosIntegrationTest method shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances.

@Test
public void shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances() throws Exception {
    if (eosConfig.equals(StreamsConfig.AT_LEAST_ONCE))
        return;
    try (final KafkaStreams streams1 = getKafkaStreams("streams1", false, "appDir1", 1, eosConfig, MAX_POLL_INTERVAL_MS);
        final KafkaStreams streams2 = getKafkaStreams("streams2", false, "appDir2", 1, eosConfig, MAX_POLL_INTERVAL_MS)) {
        startKafkaStreamsAndWaitForRunningState(streams1, MAX_WAIT_TIME_MS);
        startKafkaStreamsAndWaitForRunningState(streams2, MAX_WAIT_TIME_MS);
        final List<KeyValue<Long, Long>> committedDataBeforeStall = prepareData(0L, 10L, 0L, 1L);
        final List<KeyValue<Long, Long>> uncommittedDataBeforeStall = prepareData(10L, 15L, 0L, 1L);
        final List<KeyValue<Long, Long>> dataBeforeStall = new ArrayList<>(committedDataBeforeStall.size() + uncommittedDataBeforeStall.size());
        dataBeforeStall.addAll(committedDataBeforeStall);
        dataBeforeStall.addAll(uncommittedDataBeforeStall);
        final List<KeyValue<Long, Long>> dataToTriggerFirstRebalance = prepareData(15L, 20L, 0L, 1L);
        final List<KeyValue<Long, Long>> dataAfterSecondRebalance = prepareData(20L, 30L, 0L, 1L);
        writeInputData(committedDataBeforeStall);
        waitForCondition(() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS, "SteamsTasks did not request commit.");
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C
        // p-1: ---> 10 rec + C
        final List<KeyValue<Long, Long>> committedRecords = readResult(committedDataBeforeStall.size(), CONSUMER_GROUP_ID);
        checkResultPerKey(committedRecords, committedDataBeforeStall, "The committed records before stall do not match what expected");
        writeInputData(uncommittedDataBeforeStall);
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C  + 5 rec (pending)
        // p-1: ---> 10 rec + C  + 5 rec (pending)
        final List<KeyValue<Long, Long>> uncommittedRecords = readResult(dataBeforeStall.size(), null);
        checkResultPerKey(uncommittedRecords, dataBeforeStall, "The uncommitted records before stall do not match what expected");
        LOG.info("Injecting Stall");
        stallInjected.set(true);
        writeInputData(dataToTriggerFirstRebalance);
        LOG.info("Input Data Written");
        waitForCondition(() -> stallingHost.get() != null, MAX_WAIT_TIME_MS, "Expected a host to start stalling");
        final String observedStallingHost = stallingHost.get();
        final KafkaStreams stallingInstance;
        final KafkaStreams remainingInstance;
        if ("streams1".equals(observedStallingHost)) {
            stallingInstance = streams1;
            remainingInstance = streams2;
        } else if ("streams2".equals(observedStallingHost)) {
            stallingInstance = streams2;
            remainingInstance = streams1;
        } else {
            throw new IllegalArgumentException("unexpected host name: " + observedStallingHost);
        }
        // the stalling instance won't have an updated view, and it doesn't matter what it thinks
        // the assignment is. We only really care that the remaining instance only sees one host
        // that owns both partitions.
        waitForCondition(() -> stallingInstance.metadataForAllStreamsClients().size() == 2 && remainingInstance.metadataForAllStreamsClients().size() == 1 && remainingInstance.metadataForAllStreamsClients().iterator().next().topicPartitions().size() == 2, MAX_WAIT_TIME_MS, () -> "Should have rebalanced.\n" + "Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" + "Streams2[" + streams2.metadataForAllStreamsClients() + "]");
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C  + 5 rec + C    + 5 rec + C
        // p-1: ---> 10 rec + C  + 5 rec + C    + 5 rec + C
        final List<KeyValue<Long, Long>> committedRecordsAfterRebalance = readResult(uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size(), CONSUMER_GROUP_ID);
        final List<KeyValue<Long, Long>> expectedCommittedRecordsAfterRebalance = new ArrayList<>(uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size());
        expectedCommittedRecordsAfterRebalance.addAll(uncommittedDataBeforeStall);
        expectedCommittedRecordsAfterRebalance.addAll(dataToTriggerFirstRebalance);
        checkResultPerKey(committedRecordsAfterRebalance, expectedCommittedRecordsAfterRebalance, "The all committed records after rebalance do not match what expected");
        LOG.info("Releasing Stall");
        doStall = false;
        // Once the stalling host rejoins the group, we expect both instances to see both instances.
        // It doesn't really matter what the assignment is, but we might as well also assert that they
        // both see both partitions assigned exactly once
        waitForCondition(() -> streams1.metadataForAllStreamsClients().size() == 2 && streams2.metadataForAllStreamsClients().size() == 2 && streams1.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2 && streams2.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2, MAX_WAIT_TIME_MS, () -> "Should have rebalanced.\n" + "Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" + "Streams2[" + streams2.metadataForAllStreamsClients() + "]");
        writeInputData(dataAfterSecondRebalance);
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C  + 5 rec + C    + 5 rec + C   + 10 rec + C
        // p-1: ---> 10 rec + C  + 5 rec + C    + 5 rec + C   + 10 rec + C
        final List<KeyValue<Long, Long>> allCommittedRecords = readResult(committedDataBeforeStall.size() + uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size(), CONSUMER_GROUP_ID + "_ALL");
        final int allCommittedRecordsAfterRecoverySize = committedDataBeforeStall.size() + uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size();
        final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
        allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeStall);
        allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeStall);
        allExpectedCommittedRecordsAfterRecovery.addAll(dataToTriggerFirstRebalance);
        allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterSecondRebalance);
        checkResultPerKey(allCommittedRecords, allExpectedCommittedRecordsAfterRecovery, "The all committed records after recovery do not match what expected");
    }
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) Arrays(java.util.Arrays) Stores(org.apache.kafka.streams.state.Stores) LoggerFactory(org.slf4j.LoggerFactory) RocksDbKeyValueBytesStoreSupplier(org.apache.kafka.streams.state.internals.RocksDbKeyValueBytesStoreSupplier) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) BigInteger(java.math.BigInteger) Parameterized(org.junit.runners.Parameterized) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(org.apache.kafka.test.StreamsTestUtils.startKafkaStreamsAndWaitForRunningState) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Category(org.junit.experimental.categories.Category) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) List(java.util.List) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) TestUtils.consumerConfig(org.apache.kafka.test.TestUtils.consumerConfig) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) Admin(org.apache.kafka.clients.admin.Admin) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Before(org.junit.Before) Utils(org.apache.kafka.common.utils.Utils) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Logger(org.slf4j.Logger) Properties(java.util.Properties) IntegrationTestUtils.waitForEmptyConsumerGroup(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForEmptyConsumerGroup) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) Transformer(org.apache.kafka.streams.kstream.Transformer) Parameter(org.junit.runners.Parameterized.Parameter) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Assert.assertNotNull(org.junit.Assert.assertNotNull) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) File(java.io.File) IsolationLevel(org.apache.kafka.common.IsolationLevel) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) RocksDBStore(org.apache.kafka.streams.state.internals.RocksDBStore) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) ArrayList(java.util.ArrayList) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 90 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class EosIntegrationTest method shouldNotViolateEosIfOneTaskFails.

@Test
public void shouldNotViolateEosIfOneTaskFails() throws Exception {
    if (eosConfig.equals(StreamsConfig.AT_LEAST_ONCE))
        return;
    try (final KafkaStreams streams = getKafkaStreams("dummy", false, "appDir", 2, eosConfig, MAX_POLL_INTERVAL_MS)) {
        startKafkaStreamsAndWaitForRunningState(streams, MAX_WAIT_TIME_MS);
        final List<KeyValue<Long, Long>> committedDataBeforeFailure = prepareData(0L, 10L, 0L, 1L);
        final List<KeyValue<Long, Long>> uncommittedDataBeforeFailure = prepareData(10L, 15L, 0L, 1L);
        final List<KeyValue<Long, Long>> dataBeforeFailure = new ArrayList<>(committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size());
        dataBeforeFailure.addAll(committedDataBeforeFailure);
        dataBeforeFailure.addAll(uncommittedDataBeforeFailure);
        final List<KeyValue<Long, Long>> dataAfterFailure = prepareData(15L, 20L, 0L, 1L);
        writeInputData(committedDataBeforeFailure);
        waitForCondition(() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS, "StreamsTasks did not request commit.");
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C
        // p-1: ---> 10 rec + C
        final List<KeyValue<Long, Long>> committedRecords = readResult(committedDataBeforeFailure.size(), CONSUMER_GROUP_ID);
        checkResultPerKey(committedRecords, committedDataBeforeFailure, "The committed records before failure do not match what expected");
        writeInputData(uncommittedDataBeforeFailure);
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C  + 5 rec (pending)
        // p-1: ---> 10 rec + C  + 5 rec (pending)
        final List<KeyValue<Long, Long>> uncommittedRecords = readResult(dataBeforeFailure.size(), null);
        checkResultPerKey(uncommittedRecords, dataBeforeFailure, "The uncommitted records before failure do not match what expected");
        errorInjected.set(true);
        writeInputData(dataAfterFailure);
        waitForCondition(() -> uncaughtException != null, MAX_WAIT_TIME_MS, "Should receive uncaught exception from one StreamThread.");
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C  + 5 rec + C    + 5 rec + C
        // p-1: ---> 10 rec + C  + 5 rec + C    + 5 rec + C
        final List<KeyValue<Long, Long>> allCommittedRecords = readResult(committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size() + dataAfterFailure.size(), CONSUMER_GROUP_ID + "_ALL");
        final List<KeyValue<Long, Long>> committedRecordsAfterFailure = readResult(uncommittedDataBeforeFailure.size() + dataAfterFailure.size(), CONSUMER_GROUP_ID);
        final int allCommittedRecordsAfterRecoverySize = committedDataBeforeFailure.size() + uncommittedDataBeforeFailure.size() + dataAfterFailure.size();
        final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
        allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeFailure);
        allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeFailure);
        allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterFailure);
        final int committedRecordsAfterRecoverySize = uncommittedDataBeforeFailure.size() + dataAfterFailure.size();
        final List<KeyValue<Long, Long>> expectedCommittedRecordsAfterRecovery = new ArrayList<>(committedRecordsAfterRecoverySize);
        expectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeFailure);
        expectedCommittedRecordsAfterRecovery.addAll(dataAfterFailure);
        checkResultPerKey(allCommittedRecords, allExpectedCommittedRecordsAfterRecovery, "The all committed records after recovery do not match what expected");
        checkResultPerKey(committedRecordsAfterFailure, expectedCommittedRecordsAfterRecovery, "The committed records after recovery do not match what expected");
        assertThat("Should only get one uncaught exception from Streams.", hasUnexpectedError, is(false));
    }
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) ArrayList(java.util.ArrayList) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

KeyValue (org.apache.kafka.streams.KeyValue)343 Test (org.junit.Test)268 Properties (java.util.Properties)127 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)123 Windowed (org.apache.kafka.streams.kstream.Windowed)105 ArrayList (java.util.ArrayList)90 KafkaStreams (org.apache.kafka.streams.KafkaStreams)82 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)74 Bytes (org.apache.kafka.common.utils.Bytes)74 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)68 IntegrationTest (org.apache.kafka.test.IntegrationTest)66 Serdes (org.apache.kafka.common.serialization.Serdes)65 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)62 StreamsConfig (org.apache.kafka.streams.StreamsConfig)55 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)53 KStream (org.apache.kafka.streams.kstream.KStream)52 SessionWindow (org.apache.kafka.streams.kstream.internals.SessionWindow)46 KTable (org.apache.kafka.streams.kstream.KTable)43 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)42 Consumed (org.apache.kafka.streams.kstream.Consumed)41