Search in sources :

Example 6 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class InternalTopicManager method makeReady.

/**
     * Prepares a set of given internal topics.
     *
     * If a topic does not exist creates a new topic.
     * If a topic with the correct number of partitions exists ignores it.
     * If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
     */
public void makeReady(final Map<InternalTopicConfig, Integer> topics) {
    for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) {
        try {
            final MetadataResponse metadata = streamsKafkaClient.fetchMetadata();
            final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata);
            final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions);
            streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata);
            return;
        } catch (StreamsException ex) {
            log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i);
        }
    }
    throw new StreamsException("Could not create internal topics.");
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 7 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class RocksDBStore method openDB.

@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
    // initialize the default rocksdb options
    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    options = new Options();
    options.setTableFormatConfig(tableConfig);
    options.setWriteBufferSize(WRITE_BUFFER_SIZE);
    options.setCompressionType(COMPRESSION_TYPE);
    options.setCompactionStyle(COMPACTION_STYLE);
    options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    options.setCreateIfMissing(true);
    options.setErrorIfExists(false);
    options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation increases the number of compaction threads
    // but not flush threads.
    options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors());
    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);
    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);
    final Map<String, Object> configs = context.appConfigs();
    final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
    if (configSetterClass != null) {
        final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
        configSetter.setConfig(name, options, configs);
    }
    // we need to construct the serde while opening DB since
    // it is also triggered by windowed DB segments without initialization
    this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
    this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
    try {
        this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
    } catch (IOException e) {
        throw new StreamsException(e);
    }
}
Also used : FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) Options(org.rocksdb.Options) StreamsException(org.apache.kafka.streams.errors.StreamsException) BlockBasedTableConfig(org.rocksdb.BlockBasedTableConfig) IOException(java.io.IOException) FlushOptions(org.rocksdb.FlushOptions) WriteOptions(org.rocksdb.WriteOptions) RocksDBConfigSetter(org.apache.kafka.streams.state.RocksDBConfigSetter) File(java.io.File)

Example 8 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class TopologyBuilderTest method shouldThroughOnUnassignedStateStoreAccess.

@Test(expected = TopologyBuilderException.class)
public void shouldThroughOnUnassignedStateStoreAccess() {
    final String sourceNodeName = "source";
    final String goodNodeName = "goodGuy";
    final String badNodeName = "badGuy";
    final Properties config = new Properties();
    config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
    config.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
    final StreamsConfig streamsConfig = new StreamsConfig(config);
    try {
        final TopologyBuilder builder = new TopologyBuilder();
        builder.addSource(sourceNodeName, "topic").addProcessor(goodNodeName, new LocalMockProcessorSupplier(), sourceNodeName).addStateStore(Stores.create(LocalMockProcessorSupplier.STORE_NAME).withStringKeys().withStringValues().inMemory().build(), goodNodeName).addProcessor(badNodeName, new LocalMockProcessorSupplier(), sourceNodeName);
        final ProcessorTopologyTestDriver driver = new ProcessorTopologyTestDriver(streamsConfig, builder);
        driver.process("topic", null, null);
    } catch (final StreamsException e) {
        final Throwable cause = e.getCause();
        if (cause != null && cause instanceof TopologyBuilderException && cause.getMessage().equals("Invalid topology building: Processor " + badNodeName + " has no access to StateStore " + LocalMockProcessorSupplier.STORE_NAME)) {
            throw (TopologyBuilderException) cause;
        } else {
            throw new RuntimeException("Did expect different exception. Did catch:", e);
        }
    }
}
Also used : TopologyBuilderException(org.apache.kafka.streams.errors.TopologyBuilderException) ProcessorTopologyTestDriver(org.apache.kafka.test.ProcessorTopologyTestDriver) StreamsException(org.apache.kafka.streams.errors.StreamsException) Properties(java.util.Properties) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 9 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext.

@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext() throws Exception {
    final MockSourceNode processorNode = new MockSourceNode(topic1, intDeserializer, intDeserializer) {

        @Override
        public void process(final Object key, final Object value) {
            throw new KafkaException("KABOOM!");
        }
    };
    final List<ProcessorNode> processorNodes = Collections.<ProcessorNode>singletonList(processorNode);
    final Map<String, SourceNode> sourceNodes = Collections.<String, SourceNode>singletonMap(topic1[0], processorNode);
    final ProcessorTopology topology = new ProcessorTopology(processorNodes, sourceNodes, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>emptyList(), Collections.<String, String>emptyMap(), Collections.<StateStore>emptyList());
    task.close();
    task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, testCache, time, recordCollector);
    final int offset = 20;
    task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), offset, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
    try {
        task.process();
        fail("Should've thrown StreamsException");
    } catch (StreamsException e) {
        final String message = e.getMessage();
        assertTrue("message=" + message + " should contain topic", message.contains("topic=" + topic1[0]));
        assertTrue("message=" + message + " should contain partition", message.contains("partition=" + partition1.partition()));
        assertTrue("message=" + message + " should contain offset", message.contains("offset=" + offset));
        assertTrue("message=" + message + " should contain processor", message.contains("processor=" + processorNode.name()));
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MockSourceNode(org.apache.kafka.test.MockSourceNode) MockProcessorNode(org.apache.kafka.test.MockProcessorNode) MockSourceNode(org.apache.kafka.test.MockSourceNode) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Example 10 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class SinkNodeTest method shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch.

@Test
@SuppressWarnings("unchecked")
public void shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch() {
    // Given
    final Serializer anySerializer = Serdes.Bytes().serializer();
    final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
    final MockProcessorContext context = new MockProcessorContext(anyStateSerde, new RecordCollectorImpl(new MockProducer<byte[], byte[]>(true, anySerializer, anySerializer), null));
    context.setTime(0);
    final SinkNode sink = new SinkNode<>("anyNodeName", "any-output-topic", anySerializer, anySerializer, null);
    sink.init(context);
    final String keyOfDifferentTypeThanSerializer = "key with different type";
    final String valueOfDifferentTypeThanSerializer = "value with different type";
    // When/Then
    try {
        sink.process(keyOfDifferentTypeThanSerializer, valueOfDifferentTypeThanSerializer);
        fail("Should have thrown StreamsException");
    } catch (final StreamsException e) {
        assertThat(e.getCause(), instanceOf(ClassCastException.class));
    }
}
Also used : MockProducer(org.apache.kafka.clients.producer.MockProducer) StreamsException(org.apache.kafka.streams.errors.StreamsException) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) StateSerdes(org.apache.kafka.streams.state.StateSerdes) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) Serializer(org.apache.kafka.common.serialization.Serializer) Test(org.junit.Test)

Aggregations

StreamsException (org.apache.kafka.streams.errors.StreamsException)186 Test (org.junit.Test)90 KafkaException (org.apache.kafka.common.KafkaException)41 TopicPartition (org.apache.kafka.common.TopicPartition)38 TimeoutException (org.apache.kafka.common.errors.TimeoutException)36 HashMap (java.util.HashMap)27 Map (java.util.Map)25 HashSet (java.util.HashSet)18 Properties (java.util.Properties)17 TaskId (org.apache.kafka.streams.processor.TaskId)14 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)12 ArrayList (java.util.ArrayList)11 ExecutionException (java.util.concurrent.ExecutionException)11 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)11 IOException (java.io.IOException)10 Set (java.util.Set)10 LogContext (org.apache.kafka.common.utils.LogContext)10 MockTime (org.apache.kafka.common.utils.MockTime)10 StateStore (org.apache.kafka.streams.processor.StateStore)10