Search in sources :

Example 56 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class InternalStreamsBuilder method maybeOptimizeRepartitionOperations.

private void maybeOptimizeRepartitionOperations() {
    maybeUpdateKeyChangingRepartitionNodeMap();
    final Iterator<Entry<GraphNode, LinkedHashSet<OptimizableRepartitionNode<?, ?>>>> entryIterator = keyChangingOperationsToOptimizableRepartitionNodes.entrySet().iterator();
    while (entryIterator.hasNext()) {
        final Map.Entry<GraphNode, LinkedHashSet<OptimizableRepartitionNode<?, ?>>> entry = entryIterator.next();
        final GraphNode keyChangingNode = entry.getKey();
        if (entry.getValue().isEmpty()) {
            continue;
        }
        final GroupedInternal<?, ?> groupedInternal = new GroupedInternal<>(getRepartitionSerdes(entry.getValue()));
        final String repartitionTopicName = getFirstRepartitionTopicName(entry.getValue());
        // passing in the name of the first repartition topic, re-used to create the optimized repartition topic
        final GraphNode optimizedSingleRepartition = createRepartitionNode(repartitionTopicName, groupedInternal.keySerde(), groupedInternal.valueSerde());
        // re-use parent buildPriority to make sure the single repartition graph node is evaluated before downstream nodes
        optimizedSingleRepartition.setBuildPriority(keyChangingNode.buildPriority());
        for (final OptimizableRepartitionNode<?, ?> repartitionNodeToBeReplaced : entry.getValue()) {
            final GraphNode keyChangingNodeChild = findParentNodeMatching(repartitionNodeToBeReplaced, gn -> gn.parentNodes().contains(keyChangingNode));
            if (keyChangingNodeChild == null) {
                throw new StreamsException(String.format("Found a null keyChangingChild node for %s", repartitionNodeToBeReplaced));
            }
            LOG.debug("Found the child node of the key changer {} from the repartition {}.", keyChangingNodeChild, repartitionNodeToBeReplaced);
            // need to add children of key-changing node as children of optimized repartition
            // in order to process records from re-partitioning
            optimizedSingleRepartition.addChild(keyChangingNodeChild);
            LOG.debug("Removing {} from {}  children {}", keyChangingNodeChild, keyChangingNode, keyChangingNode.children());
            // now remove children from key-changing node
            keyChangingNode.removeChild(keyChangingNodeChild);
            // now need to get children of repartition node so we can remove repartition node
            final Collection<GraphNode> repartitionNodeToBeReplacedChildren = repartitionNodeToBeReplaced.children();
            final Collection<GraphNode> parentsOfRepartitionNodeToBeReplaced = repartitionNodeToBeReplaced.parentNodes();
            for (final GraphNode repartitionNodeToBeReplacedChild : repartitionNodeToBeReplacedChildren) {
                for (final GraphNode parentNode : parentsOfRepartitionNodeToBeReplaced) {
                    parentNode.addChild(repartitionNodeToBeReplacedChild);
                }
            }
            for (final GraphNode parentNode : parentsOfRepartitionNodeToBeReplaced) {
                parentNode.removeChild(repartitionNodeToBeReplaced);
            }
            repartitionNodeToBeReplaced.clearChildren();
            // if replaced repartition node is part of any copartition group,
            // we need to update it with the new node name so that co-partitioning won't break.
            internalTopologyBuilder.maybeUpdateCopartitionSourceGroups(repartitionNodeToBeReplaced.nodeName(), optimizedSingleRepartition.nodeName());
            LOG.debug("Updated node {} children {}", optimizedSingleRepartition, optimizedSingleRepartition.children());
        }
        keyChangingNode.addChild(optimizedSingleRepartition);
        entryIterator.remove();
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) StreamsException(org.apache.kafka.streams.errors.StreamsException) GraphNode(org.apache.kafka.streams.kstream.internals.graph.GraphNode) OptimizableRepartitionNode(org.apache.kafka.streams.kstream.internals.graph.OptimizableRepartitionNode) Entry(java.util.Map.Entry) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 57 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class ChangedSerializer method serialize.

/**
 * @throws StreamsException if both old and new values of data are null, or if
 * both values are not null
 */
@Override
public byte[] serialize(final String topic, final Headers headers, final Change<T> data) {
    final byte[] serializedKey;
    // only one of the old / new values would be not null
    if (data.newValue != null) {
        if (data.oldValue != null) {
            throw new StreamsException("Both old and new values are not null (" + data.oldValue + " : " + data.newValue + ") in ChangeSerializer, which is not allowed.");
        }
        serializedKey = inner.serialize(topic, headers, data.newValue);
    } else {
        if (data.oldValue == null) {
            throw new StreamsException("Both old and new values are null in ChangeSerializer, which is not allowed.");
        }
        serializedKey = inner.serialize(topic, headers, data.oldValue);
    }
    final ByteBuffer buf = ByteBuffer.allocate(serializedKey.length + NEWFLAG_SIZE);
    buf.put(serializedKey);
    buf.put((byte) (data.newValue != null ? 1 : 0));
    return buf.array();
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) ByteBuffer(java.nio.ByteBuffer)

Example 58 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class GlobalStreamThreadTest method shouldThrowStreamsExceptionOnStartupIfThereIsAStreamsException.

@Test
public void shouldThrowStreamsExceptionOnStartupIfThereIsAStreamsException() throws Exception {
    // should throw as the MockConsumer hasn't been configured and there are no
    // partitions available
    final StateStore globalStore = builder.globalStateStores().get(GLOBAL_STORE_NAME);
    try {
        globalStreamThread.start();
        fail("Should have thrown StreamsException if start up failed");
    } catch (final StreamsException e) {
    // ok
    }
    globalStreamThread.join();
    assertThat(globalStore.isOpen(), is(false));
    assertFalse(globalStreamThread.stillRunning());
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) StateStore(org.apache.kafka.streams.processor.StateStore) Test(org.junit.Test)

Example 59 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled.

@Test
public void shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled() {
    final long rebalanceInterval = 5 * 60 * 1000L;
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    createMockTaskManager(allTasks);
    adminClient = EasyMock.createMock(AdminClient.class);
    expect(adminClient.listOffsets(anyObject())).andThrow(new StreamsException("Should be handled"));
    configurePartitionAssignorWith(singletonMap(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, rebalanceInterval));
    final String firstConsumer = "consumer1";
    final String newConsumer = "consumer2";
    subscriptions.put(firstConsumer, new Subscription(singletonList("source1"), getInfo(UUID_1, allTasks).encode()));
    subscriptions.put(newConsumer, new Subscription(singletonList("source1"), getInfo(UUID_2, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final AssignmentInfo firstConsumerUserData = AssignmentInfo.decode(assignments.get(firstConsumer).userData());
    final List<TaskId> firstConsumerActiveTasks = firstConsumerUserData.activeTasks();
    final AssignmentInfo newConsumerUserData = AssignmentInfo.decode(assignments.get(newConsumer).userData());
    final List<TaskId> newConsumerActiveTasks = newConsumerUserData.activeTasks();
    // The tasks were returned to their prior owner
    final ArrayList<TaskId> sortedExpectedTasks = new ArrayList<>(allTasks);
    Collections.sort(sortedExpectedTasks);
    assertThat(firstConsumerActiveTasks, equalTo(sortedExpectedTasks));
    assertThat(newConsumerActiveTasks, empty());
    // There is a rebalance scheduled
    assertThat(time.milliseconds() + rebalanceInterval, anyOf(is(firstConsumerUserData.nextRebalanceMs()), is(newConsumerUserData.nextRebalanceMs())));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) StreamsException(org.apache.kafka.streams.errors.StreamsException) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 60 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentCloseIfASendFailsWithDefaultExceptionHandler.

@Test
public void shouldThrowStreamsExceptionOnSubsequentCloseIfASendFailsWithDefaultExceptionHandler() {
    final KafkaException exception = new KafkaException("KABOOM!");
    final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, getExceptionalStreamsProducerOnSend(exception), productionExceptionHandler, streamsMetrics);
    collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
    final StreamsException thrown = assertThrows(StreamsException.class, collector::closeClean);
    assertEquals(exception, thrown.getCause());
    assertThat(thrown.getMessage(), equalTo("Error encountered sending record to topic topic for task 0_0 due to:" + "\norg.apache.kafka.common.KafkaException: KABOOM!" + "\nException handler choose to FAIL the processing, no more records would be sent."));
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Aggregations

StreamsException (org.apache.kafka.streams.errors.StreamsException)186 Test (org.junit.Test)90 KafkaException (org.apache.kafka.common.KafkaException)41 TopicPartition (org.apache.kafka.common.TopicPartition)38 TimeoutException (org.apache.kafka.common.errors.TimeoutException)36 HashMap (java.util.HashMap)27 Map (java.util.Map)25 HashSet (java.util.HashSet)18 Properties (java.util.Properties)17 TaskId (org.apache.kafka.streams.processor.TaskId)14 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)12 ArrayList (java.util.ArrayList)11 ExecutionException (java.util.concurrent.ExecutionException)11 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)11 IOException (java.io.IOException)10 Set (java.util.Set)10 LogContext (org.apache.kafka.common.utils.LogContext)10 MockTime (org.apache.kafka.common.utils.MockTime)10 StateStore (org.apache.kafka.streams.processor.StateStore)10