use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class InternalStreamsBuilder method maybeOptimizeRepartitionOperations.
private void maybeOptimizeRepartitionOperations() {
maybeUpdateKeyChangingRepartitionNodeMap();
final Iterator<Entry<GraphNode, LinkedHashSet<OptimizableRepartitionNode<?, ?>>>> entryIterator = keyChangingOperationsToOptimizableRepartitionNodes.entrySet().iterator();
while (entryIterator.hasNext()) {
final Map.Entry<GraphNode, LinkedHashSet<OptimizableRepartitionNode<?, ?>>> entry = entryIterator.next();
final GraphNode keyChangingNode = entry.getKey();
if (entry.getValue().isEmpty()) {
continue;
}
final GroupedInternal<?, ?> groupedInternal = new GroupedInternal<>(getRepartitionSerdes(entry.getValue()));
final String repartitionTopicName = getFirstRepartitionTopicName(entry.getValue());
// passing in the name of the first repartition topic, re-used to create the optimized repartition topic
final GraphNode optimizedSingleRepartition = createRepartitionNode(repartitionTopicName, groupedInternal.keySerde(), groupedInternal.valueSerde());
// re-use parent buildPriority to make sure the single repartition graph node is evaluated before downstream nodes
optimizedSingleRepartition.setBuildPriority(keyChangingNode.buildPriority());
for (final OptimizableRepartitionNode<?, ?> repartitionNodeToBeReplaced : entry.getValue()) {
final GraphNode keyChangingNodeChild = findParentNodeMatching(repartitionNodeToBeReplaced, gn -> gn.parentNodes().contains(keyChangingNode));
if (keyChangingNodeChild == null) {
throw new StreamsException(String.format("Found a null keyChangingChild node for %s", repartitionNodeToBeReplaced));
}
LOG.debug("Found the child node of the key changer {} from the repartition {}.", keyChangingNodeChild, repartitionNodeToBeReplaced);
// need to add children of key-changing node as children of optimized repartition
// in order to process records from re-partitioning
optimizedSingleRepartition.addChild(keyChangingNodeChild);
LOG.debug("Removing {} from {} children {}", keyChangingNodeChild, keyChangingNode, keyChangingNode.children());
// now remove children from key-changing node
keyChangingNode.removeChild(keyChangingNodeChild);
// now need to get children of repartition node so we can remove repartition node
final Collection<GraphNode> repartitionNodeToBeReplacedChildren = repartitionNodeToBeReplaced.children();
final Collection<GraphNode> parentsOfRepartitionNodeToBeReplaced = repartitionNodeToBeReplaced.parentNodes();
for (final GraphNode repartitionNodeToBeReplacedChild : repartitionNodeToBeReplacedChildren) {
for (final GraphNode parentNode : parentsOfRepartitionNodeToBeReplaced) {
parentNode.addChild(repartitionNodeToBeReplacedChild);
}
}
for (final GraphNode parentNode : parentsOfRepartitionNodeToBeReplaced) {
parentNode.removeChild(repartitionNodeToBeReplaced);
}
repartitionNodeToBeReplaced.clearChildren();
// if replaced repartition node is part of any copartition group,
// we need to update it with the new node name so that co-partitioning won't break.
internalTopologyBuilder.maybeUpdateCopartitionSourceGroups(repartitionNodeToBeReplaced.nodeName(), optimizedSingleRepartition.nodeName());
LOG.debug("Updated node {} children {}", optimizedSingleRepartition, optimizedSingleRepartition.children());
}
keyChangingNode.addChild(optimizedSingleRepartition);
entryIterator.remove();
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class ChangedSerializer method serialize.
/**
* @throws StreamsException if both old and new values of data are null, or if
* both values are not null
*/
@Override
public byte[] serialize(final String topic, final Headers headers, final Change<T> data) {
final byte[] serializedKey;
// only one of the old / new values would be not null
if (data.newValue != null) {
if (data.oldValue != null) {
throw new StreamsException("Both old and new values are not null (" + data.oldValue + " : " + data.newValue + ") in ChangeSerializer, which is not allowed.");
}
serializedKey = inner.serialize(topic, headers, data.newValue);
} else {
if (data.oldValue == null) {
throw new StreamsException("Both old and new values are null in ChangeSerializer, which is not allowed.");
}
serializedKey = inner.serialize(topic, headers, data.oldValue);
}
final ByteBuffer buf = ByteBuffer.allocate(serializedKey.length + NEWFLAG_SIZE);
buf.put(serializedKey);
buf.put((byte) (data.newValue != null ? 1 : 0));
return buf.array();
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class GlobalStreamThreadTest method shouldThrowStreamsExceptionOnStartupIfThereIsAStreamsException.
@Test
public void shouldThrowStreamsExceptionOnStartupIfThereIsAStreamsException() throws Exception {
// should throw as the MockConsumer hasn't been configured and there are no
// partitions available
final StateStore globalStore = builder.globalStateStores().get(GLOBAL_STORE_NAME);
try {
globalStreamThread.start();
fail("Should have thrown StreamsException if start up failed");
} catch (final StreamsException e) {
// ok
}
globalStreamThread.join();
assertThat(globalStore.isOpen(), is(false));
assertFalse(globalStreamThread.stillRunning());
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class HighAvailabilityStreamsPartitionAssignorTest method shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled.
@Test
public void shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled() {
final long rebalanceInterval = 5 * 60 * 1000L;
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
createMockTaskManager(allTasks);
adminClient = EasyMock.createMock(AdminClient.class);
expect(adminClient.listOffsets(anyObject())).andThrow(new StreamsException("Should be handled"));
configurePartitionAssignorWith(singletonMap(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, rebalanceInterval));
final String firstConsumer = "consumer1";
final String newConsumer = "consumer2";
subscriptions.put(firstConsumer, new Subscription(singletonList("source1"), getInfo(UUID_1, allTasks).encode()));
subscriptions.put(newConsumer, new Subscription(singletonList("source1"), getInfo(UUID_2, EMPTY_TASKS).encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final AssignmentInfo firstConsumerUserData = AssignmentInfo.decode(assignments.get(firstConsumer).userData());
final List<TaskId> firstConsumerActiveTasks = firstConsumerUserData.activeTasks();
final AssignmentInfo newConsumerUserData = AssignmentInfo.decode(assignments.get(newConsumer).userData());
final List<TaskId> newConsumerActiveTasks = newConsumerUserData.activeTasks();
// The tasks were returned to their prior owner
final ArrayList<TaskId> sortedExpectedTasks = new ArrayList<>(allTasks);
Collections.sort(sortedExpectedTasks);
assertThat(firstConsumerActiveTasks, equalTo(sortedExpectedTasks));
assertThat(newConsumerActiveTasks, empty());
// There is a rebalance scheduled
assertThat(time.milliseconds() + rebalanceInterval, anyOf(is(firstConsumerUserData.nextRebalanceMs()), is(newConsumerUserData.nextRebalanceMs())));
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentCloseIfASendFailsWithDefaultExceptionHandler.
@Test
public void shouldThrowStreamsExceptionOnSubsequentCloseIfASendFailsWithDefaultExceptionHandler() {
final KafkaException exception = new KafkaException("KABOOM!");
final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(exception), productionExceptionHandler, streamsMetrics);
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
final StreamsException thrown = assertThrows(StreamsException.class, collector::closeClean);
assertEquals(exception, thrown.getCause());
assertThat(thrown.getMessage(), equalTo("Error encountered sending record to topic topic for task 0_0 due to:" + "\norg.apache.kafka.common.KafkaException: KABOOM!" + "\nException handler choose to FAIL the processing, no more records would be sent."));
}
Aggregations