Search in sources :

Example 16 with BatchItem

use of org.zalando.nakadi.domain.BatchItem in project nakadi by zalando.

the class KafkaTopicRepositoryTest method whenPostEventOverflowsBufferThenUpdateItemStatus.

@Test
public void whenPostEventOverflowsBufferThenUpdateItemStatus() throws Exception {
    final BatchItem item = new BatchItem("{}", BatchItem.EmptyInjectionConfiguration.build(1, true), new BatchItem.InjectionConfiguration[BatchItem.Injection.values().length], Collections.emptyList());
    item.setPartition("1");
    final List<BatchItem> batch = new ArrayList<>();
    batch.add(item);
    when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, new Node(1, "host", 9091), null, null)));
    Mockito.doThrow(BufferExhaustedException.class).when(kafkaProducer).send(any(), any());
    try {
        kafkaTopicRepository.syncPostBatch(EXPECTED_PRODUCER_RECORD.topic(), batch);
        fail();
    } catch (final EventPublishingException e) {
        assertThat(item.getResponse().getPublishingStatus(), equalTo(EventPublishingStatus.FAILED));
        assertThat(item.getResponse().getDetail(), equalTo("internal error"));
    }
}
Also used : Node(org.apache.kafka.common.Node) BatchItem(org.zalando.nakadi.domain.BatchItem) ArrayList(java.util.ArrayList) PartitionInfo(org.apache.kafka.common.PartitionInfo) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) Test(org.junit.Test)

Example 17 with BatchItem

use of org.zalando.nakadi.domain.BatchItem in project nakadi by zalando.

the class KafkaTopicRepositoryTest method whenKafkaPublishTimeoutThenCircuitIsOpened.

@Test
public void whenKafkaPublishTimeoutThenCircuitIsOpened() throws Exception {
    when(nakadiSettings.getKafkaSendTimeoutMs()).thenReturn(1000L);
    when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, new Node(1, "host", 9091), null, null)));
    when(kafkaProducer.send(any(), any())).thenAnswer(invocation -> {
        final Callback callback = (Callback) invocation.getArguments()[1];
        callback.onCompletion(null, new TimeoutException());
        return null;
    });
    final List<BatchItem> batches = new LinkedList<>();
    for (int i = 0; i < 1000; i++) {
        try {
            final BatchItem batchItem = new BatchItem("{}", BatchItem.EmptyInjectionConfiguration.build(1, true), new BatchItem.InjectionConfiguration[BatchItem.Injection.values().length], Collections.emptyList());
            batchItem.setPartition("1");
            batches.add(batchItem);
            kafkaTopicRepository.syncPostBatch(EXPECTED_PRODUCER_RECORD.topic(), ImmutableList.of(batchItem));
            fail();
        } catch (final EventPublishingException e) {
        }
    }
    Assert.assertTrue(batches.stream().filter(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED && item.getResponse().getDetail().equals("short circuited")).count() >= 1);
}
Also used : EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) GetChildrenBuilder(org.apache.curator.framework.api.GetChildrenBuilder) KafkaException(org.apache.kafka.common.KafkaException) NakadiException(org.zalando.nakadi.exceptions.NakadiException) Assert.assertThat(org.junit.Assert.assertThat) Future(java.util.concurrent.Future) Arrays.asList(java.util.Arrays.asList) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Sets.newHashSet(com.google.common.collect.Sets.newHashSet) Assert.fail(org.junit.Assert.fail) Matchers.anyVararg(org.mockito.Matchers.anyVararg) Consumer(org.apache.kafka.clients.consumer.Consumer) ZooKeeperHolder(org.zalando.nakadi.repository.zookeeper.ZooKeeperHolder) PartitionStatistics(org.zalando.nakadi.domain.PartitionStatistics) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) Collectors(java.util.stream.Collectors) Matchers.any(org.mockito.Matchers.any) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) TestUtils.buildTimelineWithTopic(org.zalando.nakadi.utils.TestUtils.buildTimelineWithTopic) Timeline(org.zalando.nakadi.domain.Timeline) ZookeeperSettings(org.zalando.nakadi.repository.zookeeper.ZookeeperSettings) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Matchers.equalTo(org.hamcrest.Matchers.equalTo) BatchItem(org.zalando.nakadi.domain.BatchItem) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) Callback(org.apache.kafka.clients.producer.Callback) Mockito.mock(org.mockito.Mockito.mock) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NakadiCursor(org.zalando.nakadi.domain.NakadiCursor) NakadiSettings(org.zalando.nakadi.config.NakadiSettings) Cursor(org.zalando.nakadi.view.Cursor) Matchers.anyString(org.mockito.Matchers.anyString) ArrayList(java.util.ArrayList) UUIDGenerator(org.zalando.nakadi.util.UUIDGenerator) HashSet(java.util.HashSet) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ImmutableList(com.google.common.collect.ImmutableList) LinkedList(java.util.LinkedList) TimeoutException(org.apache.kafka.common.errors.TimeoutException) PartitionEndStatistics(org.zalando.nakadi.domain.PartitionEndStatistics) Test(org.junit.Test) BufferExhaustedException(org.apache.kafka.clients.producer.BufferExhaustedException) Mockito.when(org.mockito.Mockito.when) Mockito(org.mockito.Mockito) Collectors.toList(java.util.stream.Collectors.toList) EventPublishingStatus(org.zalando.nakadi.domain.EventPublishingStatus) CursorError(org.zalando.nakadi.domain.CursorError) Assert(org.junit.Assert) Collections(java.util.Collections) Callback(org.apache.kafka.clients.producer.Callback) Node(org.apache.kafka.common.Node) BatchItem(org.zalando.nakadi.domain.BatchItem) PartitionInfo(org.apache.kafka.common.PartitionInfo) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) LinkedList(java.util.LinkedList) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 18 with BatchItem

use of org.zalando.nakadi.domain.BatchItem in project nakadi by zalando.

the class KafkaTopicRepositoryTest method whenKafkaPublishCallbackWithExceptionThenEventPublishingException.

@Test
public void whenKafkaPublishCallbackWithExceptionThenEventPublishingException() throws Exception {
    final BatchItem firstItem = new BatchItem("{}", BatchItem.EmptyInjectionConfiguration.build(1, true), new BatchItem.InjectionConfiguration[BatchItem.Injection.values().length], Collections.emptyList());
    firstItem.setPartition("1");
    final BatchItem secondItem = new BatchItem("{}", BatchItem.EmptyInjectionConfiguration.build(1, true), new BatchItem.InjectionConfiguration[BatchItem.Injection.values().length], Collections.emptyList());
    secondItem.setPartition("2");
    final List<BatchItem> batch = ImmutableList.of(firstItem, secondItem);
    when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, new Node(1, "host", 9091), null, null), new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 2, new Node(1, "host", 9091), null, null)));
    when(kafkaProducer.send(any(), any())).thenAnswer(invocation -> {
        final ProducerRecord record = (ProducerRecord) invocation.getArguments()[0];
        final Callback callback = (Callback) invocation.getArguments()[1];
        if (record.partition() == 2) {
            // return exception only for second event
            callback.onCompletion(null, new Exception());
        } else {
            callback.onCompletion(null, null);
        }
        return null;
    });
    try {
        kafkaTopicRepository.syncPostBatch(EXPECTED_PRODUCER_RECORD.topic(), batch);
        fail();
    } catch (final EventPublishingException e) {
        assertThat(firstItem.getResponse().getPublishingStatus(), equalTo(EventPublishingStatus.SUBMITTED));
        assertThat(firstItem.getResponse().getDetail(), equalTo(""));
        assertThat(secondItem.getResponse().getPublishingStatus(), equalTo(EventPublishingStatus.FAILED));
        assertThat(secondItem.getResponse().getDetail(), equalTo("internal error"));
    }
}
Also used : Callback(org.apache.kafka.clients.producer.Callback) Node(org.apache.kafka.common.Node) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BatchItem(org.zalando.nakadi.domain.BatchItem) PartitionInfo(org.apache.kafka.common.PartitionInfo) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) KafkaException(org.apache.kafka.common.KafkaException) NakadiException(org.zalando.nakadi.exceptions.NakadiException) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) BufferExhaustedException(org.apache.kafka.clients.producer.BufferExhaustedException) Test(org.junit.Test)

Example 19 with BatchItem

use of org.zalando.nakadi.domain.BatchItem in project nakadi by zalando.

the class EventPublisherTest method createStringFromBatchItems.

private String createStringFromBatchItems(final List<BatchItem> batch) {
    final StringBuilder sb = new StringBuilder();
    sb.append("[");
    for (final BatchItem item : batch) {
        sb.append(item.getEvent().toString());
        sb.append(",");
    }
    sb.setCharAt(sb.length() - 1, ']');
    return sb.toString();
}
Also used : TestUtils.createBatchItem(org.zalando.nakadi.utils.TestUtils.createBatchItem) BatchItem(org.zalando.nakadi.domain.BatchItem)

Example 20 with BatchItem

use of org.zalando.nakadi.domain.BatchItem in project nakadi by zalando.

the class KafkaTopicRepository method syncPostBatch.

@Override
public void syncPostBatch(final String topicId, final List<BatchItem> batch) throws EventPublishingException {
    final Producer<String, String> producer = kafkaFactory.takeProducer();
    try {
        final Map<String, String> partitionToBroker = producer.partitionsFor(topicId).stream().collect(Collectors.toMap(p -> String.valueOf(p.partition()), p -> String.valueOf(p.leader().id())));
        batch.forEach(item -> {
            Preconditions.checkNotNull(item.getPartition(), "BatchItem partition can't be null at the moment of publishing!");
            item.setBrokerId(partitionToBroker.get(item.getPartition()));
        });
        int shortCircuited = 0;
        final Map<BatchItem, CompletableFuture<Exception>> sendFutures = new HashMap<>();
        for (final BatchItem item : batch) {
            item.setStep(EventPublishingStep.PUBLISHING);
            final HystrixKafkaCircuitBreaker circuitBreaker = circuitBreakers.computeIfAbsent(item.getBrokerId(), brokerId -> new HystrixKafkaCircuitBreaker(brokerId));
            if (circuitBreaker.allowRequest()) {
                sendFutures.put(item, publishItem(producer, topicId, item, circuitBreaker));
            } else {
                shortCircuited++;
                item.updateStatusAndDetail(EventPublishingStatus.FAILED, "short circuited");
            }
        }
        if (shortCircuited > 0) {
            LOG.warn("Short circuiting request to Kafka {} time(s) due to timeout for topic {}", shortCircuited, topicId);
        }
        final CompletableFuture<Void> multiFuture = CompletableFuture.allOf(sendFutures.values().toArray(new CompletableFuture<?>[sendFutures.size()]));
        multiFuture.get(createSendTimeout(), TimeUnit.MILLISECONDS);
        // Now lets check for errors
        final Optional<Exception> needReset = sendFutures.entrySet().stream().filter(entry -> isExceptionShouldLeadToReset(entry.getValue().getNow(null))).map(entry -> entry.getValue().getNow(null)).findAny();
        if (needReset.isPresent()) {
            LOG.info("Terminating producer while publishing to topic {} because of unrecoverable exception", topicId, needReset.get());
            kafkaFactory.terminateProducer(producer);
        }
    } catch (final TimeoutException ex) {
        failUnpublished(batch, "timed out");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } catch (final ExecutionException ex) {
        failUnpublished(batch, "internal error");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } catch (final InterruptedException ex) {
        Thread.currentThread().interrupt();
        failUnpublished(batch, "interrupted");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } finally {
        kafkaFactory.releaseProducer(producer);
    }
    final boolean atLeastOneFailed = batch.stream().anyMatch(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED);
    if (atLeastOneFailed) {
        failUnpublished(batch, "internal error");
        throw new EventPublishingException("Error publishing message to kafka");
    }
}
Also used : EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) Collections.unmodifiableList(java.util.Collections.unmodifiableList) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) PARTITION_NOT_FOUND(org.zalando.nakadi.domain.CursorError.PARTITION_NOT_FOUND) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) Map(java.util.Map) RetryForSpecifiedTimeStrategy(org.echocat.jomon.runtime.concurrent.RetryForSpecifiedTimeStrategy) Consumer(org.apache.kafka.clients.consumer.Consumer) ZooKeeperHolder(org.zalando.nakadi.repository.zookeeper.ZooKeeperHolder) TopicPartition(org.apache.kafka.common.TopicPartition) TopicRepository(org.zalando.nakadi.repository.TopicRepository) Retryer(org.echocat.jomon.runtime.concurrent.Retryer) Collection(java.util.Collection) PartitionStatistics(org.zalando.nakadi.domain.PartitionStatistics) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConfigType(kafka.server.ConfigType) PartitionInfo(org.apache.kafka.common.PartitionInfo) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) Collectors(java.util.stream.Collectors) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) Objects(java.util.Objects) ZkUtils(kafka.utils.ZkUtils) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) Stream(java.util.stream.Stream) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Timeline(org.zalando.nakadi.domain.Timeline) ZookeeperSettings(org.zalando.nakadi.repository.zookeeper.ZookeeperSettings) NULL_OFFSET(org.zalando.nakadi.domain.CursorError.NULL_OFFSET) BatchItem(org.zalando.nakadi.domain.BatchItem) Optional(java.util.Optional) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) AdminUtils(kafka.admin.AdminUtils) IntStream(java.util.stream.IntStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NetworkException(org.apache.kafka.common.errors.NetworkException) NakadiCursor(org.zalando.nakadi.domain.NakadiCursor) NakadiSettings(org.zalando.nakadi.config.NakadiSettings) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) UUIDGenerator(org.zalando.nakadi.util.UUIDGenerator) InterruptException(org.apache.kafka.common.errors.InterruptException) EventPublishingStep(org.zalando.nakadi.domain.EventPublishingStep) Nullable(javax.annotation.Nullable) UNAVAILABLE(org.zalando.nakadi.domain.CursorError.UNAVAILABLE) NULL_PARTITION(org.zalando.nakadi.domain.CursorError.NULL_PARTITION) Logger(org.slf4j.Logger) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) PartitionEndStatistics(org.zalando.nakadi.domain.PartitionEndStatistics) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) EventConsumer(org.zalando.nakadi.repository.EventConsumer) Collectors.toList(java.util.stream.Collectors.toList) EventPublishingStatus(org.zalando.nakadi.domain.EventPublishingStatus) Preconditions(com.google.common.base.Preconditions) Collections(java.util.Collections) RackAwareMode(kafka.admin.RackAwareMode) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) NetworkException(org.apache.kafka.common.errors.NetworkException) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExecutionException(java.util.concurrent.ExecutionException) CompletableFuture(java.util.concurrent.CompletableFuture) BatchItem(org.zalando.nakadi.domain.BatchItem) ExecutionException(java.util.concurrent.ExecutionException) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

BatchItem (org.zalando.nakadi.domain.BatchItem)23 Test (org.junit.Test)16 EventType (org.zalando.nakadi.domain.EventType)12 TestUtils.createBatchItem (org.zalando.nakadi.utils.TestUtils.createBatchItem)12 TestUtils.buildDefaultEventType (org.zalando.nakadi.utils.TestUtils.buildDefaultEventType)11 JSONObject (org.json.JSONObject)10 ArrayList (java.util.ArrayList)8 EventPublishingException (org.zalando.nakadi.exceptions.EventPublishingException)7 PartitionInfo (org.apache.kafka.common.PartitionInfo)6 Collections (java.util.Collections)3 List (java.util.List)3 Set (java.util.Set)3 TimeoutException (java.util.concurrent.TimeoutException)3 Collectors (java.util.stream.Collectors)3 Collectors.toList (java.util.stream.Collectors.toList)3 Consumer (org.apache.kafka.clients.consumer.Consumer)3 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)3 Node (org.apache.kafka.common.Node)3 Matchers.isEmptyString (org.hamcrest.Matchers.isEmptyString)3 InvalidCursorException (org.zalando.nakadi.exceptions.InvalidCursorException)3