use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class InternalTopicManager method makeReady.
/**
* Prepares a set of given internal topics.
*
* If a topic does not exist creates a new topic.
* If a topic with the correct number of partitions exists ignores it.
* If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
*/
public void makeReady(final Map<InternalTopicConfig, Integer> topics) {
for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) {
try {
final MetadataResponse metadata = streamsKafkaClient.fetchMetadata();
final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata);
final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions);
streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata);
return;
} catch (StreamsException ex) {
log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i);
}
}
throw new StreamsException("Could not create internal topics.");
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class RocksDBStore method openDB.
@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
// initialize the default rocksdb options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
options = new Options();
options.setTableFormatConfig(tableConfig);
options.setWriteBufferSize(WRITE_BUFFER_SIZE);
options.setCompressionType(COMPRESSION_TYPE);
options.setCompactionStyle(COMPACTION_STYLE);
options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
options.setCreateIfMissing(true);
options.setErrorIfExists(false);
options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation increases the number of compaction threads
// but not flush threads.
options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors());
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Map<String, Object> configs = context.appConfigs();
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
if (configSetterClass != null) {
final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, options, configs);
}
// we need to construct the serde while opening DB since
// it is also triggered by windowed DB segments without initialization
this.serdes = new StateSerdes<>(name, keySerde == null ? (Serde<K>) context.keySerde() : keySerde, valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
try {
this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
} catch (IOException e) {
throw new StreamsException(e);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class TopologyBuilderTest method shouldThroughOnUnassignedStateStoreAccess.
@Test(expected = TopologyBuilderException.class)
public void shouldThroughOnUnassignedStateStoreAccess() {
final String sourceNodeName = "source";
final String goodNodeName = "goodGuy";
final String badNodeName = "badGuy";
final Properties config = new Properties();
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
final StreamsConfig streamsConfig = new StreamsConfig(config);
try {
final TopologyBuilder builder = new TopologyBuilder();
builder.addSource(sourceNodeName, "topic").addProcessor(goodNodeName, new LocalMockProcessorSupplier(), sourceNodeName).addStateStore(Stores.create(LocalMockProcessorSupplier.STORE_NAME).withStringKeys().withStringValues().inMemory().build(), goodNodeName).addProcessor(badNodeName, new LocalMockProcessorSupplier(), sourceNodeName);
final ProcessorTopologyTestDriver driver = new ProcessorTopologyTestDriver(streamsConfig, builder);
driver.process("topic", null, null);
} catch (final StreamsException e) {
final Throwable cause = e.getCause();
if (cause != null && cause instanceof TopologyBuilderException && cause.getMessage().equals("Invalid topology building: Processor " + badNodeName + " has no access to StateStore " + LocalMockProcessorSupplier.STORE_NAME)) {
throw (TopologyBuilderException) cause;
} else {
throw new RuntimeException("Did expect different exception. Did catch:", e);
}
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext.
@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext() throws Exception {
final MockSourceNode processorNode = new MockSourceNode(topic1, intDeserializer, intDeserializer) {
@Override
public void process(final Object key, final Object value) {
throw new KafkaException("KABOOM!");
}
};
final List<ProcessorNode> processorNodes = Collections.<ProcessorNode>singletonList(processorNode);
final Map<String, SourceNode> sourceNodes = Collections.<String, SourceNode>singletonMap(topic1[0], processorNode);
final ProcessorTopology topology = new ProcessorTopology(processorNodes, sourceNodes, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>emptyList(), Collections.<String, String>emptyMap(), Collections.<StateStore>emptyList());
task.close();
task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, testCache, time, recordCollector);
final int offset = 20;
task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), offset, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
try {
task.process();
fail("Should've thrown StreamsException");
} catch (StreamsException e) {
final String message = e.getMessage();
assertTrue("message=" + message + " should contain topic", message.contains("topic=" + topic1[0]));
assertTrue("message=" + message + " should contain partition", message.contains("partition=" + partition1.partition()));
assertTrue("message=" + message + " should contain offset", message.contains("offset=" + offset));
assertTrue("message=" + message + " should contain processor", message.contains("processor=" + processorNode.name()));
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class SinkNodeTest method shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch.
@Test
@SuppressWarnings("unchecked")
public void shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch() {
// Given
final Serializer anySerializer = Serdes.Bytes().serializer();
final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
final MockProcessorContext context = new MockProcessorContext(anyStateSerde, new RecordCollectorImpl(new MockProducer<byte[], byte[]>(true, anySerializer, anySerializer), null));
context.setTime(0);
final SinkNode sink = new SinkNode<>("anyNodeName", "any-output-topic", anySerializer, anySerializer, null);
sink.init(context);
final String keyOfDifferentTypeThanSerializer = "key with different type";
final String valueOfDifferentTypeThanSerializer = "value with different type";
// When/Then
try {
sink.process(keyOfDifferentTypeThanSerializer, valueOfDifferentTypeThanSerializer);
fail("Should have thrown StreamsException");
} catch (final StreamsException e) {
assertThat(e.getCause(), instanceOf(ClassCastException.class));
}
}
Aggregations