Search in sources :

Example 41 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.

the class RecordQueue method addRawRecords.

/**
 * Add a batch of {@link ConsumerRecord} into the queue
 *
 * @param rawRecords the raw records
 * @return the size of this queue
 */
int addRawRecords(final Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) {
    for (final ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) {
        final ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(processorContext, rawRecord);
        if (record == null) {
            continue;
        }
        final long timestamp;
        try {
            timestamp = timestampExtractor.extract(record, timeTracker.get());
        } catch (final StreamsException internalFatalExtractorException) {
            throw internalFatalExtractorException;
        } catch (final Exception fatalUserException) {
            throw new StreamsException(String.format("Fatal user code error in TimestampExtractor callback for record %s.", record), fatalUserException);
        }
        log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record);
        // drop message if TS is invalid, i.e., negative
        if (timestamp < 0) {
            continue;
        }
        final StampedRecord stampedRecord = new StampedRecord(record, timestamp);
        fifoQueue.addLast(stampedRecord);
        timeTracker.addElement(stampedRecord);
    }
    // update the partition timestamp if its currently
    // tracked min timestamp has exceed its value; this will
    // usually only take effect for the first added batch
    final long timestamp = timeTracker.get();
    if (timestamp > partitionTime) {
        partitionTime = timestamp;
    }
    return size();
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) StreamsException(org.apache.kafka.streams.errors.StreamsException)

Example 42 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class MeteredTimestampedWindowStoreTest method shouldNotThrowExceptionIfSerdesCorrectlySetFromConstructorParameters.

@Test
public void shouldNotThrowExceptionIfSerdesCorrectlySetFromConstructorParameters() {
    EasyMock.expect(innerStoreMock.name()).andStubReturn("mocked-store");
    EasyMock.replay(innerStoreMock);
    final MeteredTimestampedWindowStore<String, Long> store = new MeteredTimestampedWindowStore<>(innerStoreMock, // any size
    10L, "scope", new MockTime(), Serdes.String(), new ValueAndTimestampSerde<>(Serdes.Long()));
    store.init((StateStoreContext) context, innerStoreMock);
    try {
        store.put("key", ValueAndTimestamp.make(42L, 60000), 60000L);
    } catch (final StreamsException exception) {
        if (exception.getCause() instanceof ClassCastException) {
            fail("Serdes are not correctly set from constructor parameters.");
        }
        throw exception;
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 43 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class MeteredTimestampedWindowStoreTest method shouldNotThrowExceptionIfSerdesCorrectlySetFromProcessorContext.

@Test
public void shouldNotThrowExceptionIfSerdesCorrectlySetFromProcessorContext() {
    EasyMock.expect(innerStoreMock.name()).andStubReturn("mocked-store");
    EasyMock.replay(innerStoreMock);
    final MeteredTimestampedWindowStore<String, Long> store = new MeteredTimestampedWindowStore<>(innerStoreMock, // any size
    10L, "scope", new MockTime(), null, null);
    store.init((StateStoreContext) context, innerStoreMock);
    try {
        store.put("key", ValueAndTimestamp.make(42L, 60000), 60000L);
    } catch (final StreamsException exception) {
        if (exception.getCause() instanceof ClassCastException) {
            fail("Serdes are not correctly set from processor context.");
        }
        throw exception;
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 44 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class TopologyTest method shouldThrowOnUnassignedStateStoreAccess.

@Test
public void shouldThrowOnUnassignedStateStoreAccess() {
    final String sourceNodeName = "source";
    final String goodNodeName = "goodGuy";
    final String badNodeName = "badGuy";
    mockStoreBuilder();
    EasyMock.expect(storeBuilder.build()).andReturn(new MockKeyValueStore("store", false));
    EasyMock.replay(storeBuilder);
    topology.addSource(sourceNodeName, "topic").addProcessor(goodNodeName, new LocalMockProcessorSupplier(), sourceNodeName).addStateStore(storeBuilder, goodNodeName).addProcessor(badNodeName, new LocalMockProcessorSupplier(), sourceNodeName);
    final Properties config = new Properties();
    config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
    config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
    try {
        new TopologyTestDriver(topology, config);
        fail("Should have thrown StreamsException");
    } catch (final StreamsException e) {
        final String error = e.toString();
        final String expectedMessage = "org.apache.kafka.streams.errors.StreamsException: failed to initialize processor " + badNodeName;
        assertThat(error, equalTo(expectedMessage));
    }
}
Also used : Serdes(org.apache.kafka.common.serialization.Serdes) StreamsException(org.apache.kafka.streams.errors.StreamsException) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) Properties(java.util.Properties) Test(org.junit.Test)

Example 45 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class StreamThreadTest method shouldNotReturnDataAfterTaskMigrated.

@Test
public void shouldNotReturnDataAfterTaskMigrated() {
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
    final InternalTopologyBuilder internalTopologyBuilder = EasyMock.createNiceMock(InternalTopologyBuilder.class);
    expect(internalTopologyBuilder.fullSourceTopicNames()).andReturn(Collections.singletonList(topic1)).times(2);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
    consumer.subscribe(Collections.singletonList(topic1), new MockRebalanceListener());
    consumer.rebalance(Collections.singletonList(t1p1));
    consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
    consumer.seekToEnd(Collections.singletonList(t1p1));
    final ChangelogReader changelogReader = new MockChangelogReader() {

        @Override
        public void restore(final Map<TaskId, Task> tasks) {
            consumer.addRecord(new ConsumerRecord<>(topic1, 1, 11, new byte[0], new byte[0]));
            consumer.addRecord(new ConsumerRecord<>(topic1, 1, 12, new byte[1], new byte[0]));
            throw new TaskMigratedException("Changelog restore found task migrated", new RuntimeException("restore task migrated"));
        }
    };
    taskManager.handleLostAll();
    EasyMock.replay(taskManager, internalTopologyBuilder);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final StreamThread thread = new StreamThread(mockTime, config, null, consumer, consumer, changelogReader, null, taskManager, streamsMetrics, new TopologyMetadata(internalTopologyBuilder, config), CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
    final StreamsException thrown = assertThrows(StreamsException.class, thread::run);
    verify(taskManager);
    assertThat(thrown.getCause(), isA(IllegalStateException.class));
    // The Mock consumer shall throw as the assignment has been wiped out, but records are assigned.
    assertEquals("No current assignment for partition topic1-1", thrown.getCause().getMessage());
    assertFalse(consumer.shouldRebalance());
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) LogContext(org.apache.kafka.common.utils.LogContext) LinkedList(java.util.LinkedList) MockRebalanceListener(org.apache.kafka.clients.consumer.internals.MockRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Aggregations

StreamsException (org.apache.kafka.streams.errors.StreamsException)186 Test (org.junit.Test)90 KafkaException (org.apache.kafka.common.KafkaException)41 TopicPartition (org.apache.kafka.common.TopicPartition)38 TimeoutException (org.apache.kafka.common.errors.TimeoutException)36 HashMap (java.util.HashMap)27 Map (java.util.Map)25 HashSet (java.util.HashSet)18 Properties (java.util.Properties)17 TaskId (org.apache.kafka.streams.processor.TaskId)14 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)12 ArrayList (java.util.ArrayList)11 ExecutionException (java.util.concurrent.ExecutionException)11 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)11 IOException (java.io.IOException)10 Set (java.util.Set)10 LogContext (org.apache.kafka.common.utils.LogContext)10 MockTime (org.apache.kafka.common.utils.MockTime)10 StateStore (org.apache.kafka.streams.processor.StateStore)10