Search in sources :

Example 61 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class KafkaProducerTest method testConstructorFailureCloseResource.

@Test
public void testConstructorFailureCloseResource() {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999");
    props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
    final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
    final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
    try (KafkaProducer<byte[], byte[]> ignored = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) {
        fail("should have caught an exception and returned");
    } catch (KafkaException e) {
        assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
        assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
        assertEquals("Failed to construct kafka producer", e.getMessage());
    }
}
Also used : MockMetricsReporter(org.apache.kafka.test.MockMetricsReporter) KafkaException(org.apache.kafka.common.KafkaException) Properties(java.util.Properties) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 62 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ProducerBatchTest method testBatchCannotAbortTwice.

@Test
public void testBatchCannotAbortTwice() throws Exception {
    ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now);
    MockCallback callback = new MockCallback();
    FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now);
    KafkaException exception = new KafkaException();
    batch.abort(exception);
    assertEquals(1, callback.invocations);
    assertEquals(exception, callback.exception);
    assertNull(callback.metadata);
    try {
        batch.abort(new KafkaException());
        fail("Expected exception from abort");
    } catch (IllegalStateException e) {
    // expected
    }
    assertEquals(1, callback.invocations);
    assertTrue(future.isDone());
    try {
        future.get();
        fail("Future should have thrown");
    } catch (ExecutionException e) {
        assertEquals(exception, e.getCause());
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.jupiter.api.Test)

Example 63 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ProducerBatchTest method testBatchAbort.

@Test
public void testBatchAbort() throws Exception {
    ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now);
    MockCallback callback = new MockCallback();
    FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now);
    KafkaException exception = new KafkaException();
    batch.abort(exception);
    assertTrue(future.isDone());
    assertEquals(1, callback.invocations);
    assertEquals(exception, callback.exception);
    assertNull(callback.metadata);
    // subsequent completion should be ignored
    assertFalse(batch.complete(500L, 2342342341L));
    assertFalse(batch.completeExceptionally(new KafkaException(), index -> new KafkaException()));
    assertEquals(1, callback.invocations);
    assertTrue(future.isDone());
    try {
        future.get();
        fail("Future should have thrown");
    } catch (ExecutionException e) {
        assertEquals(exception, e.getCause());
    }
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Arrays(java.util.Arrays) MAGIC_VALUE_V2(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2) MAGIC_VALUE_V1(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V1) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Deque(java.util.Deque) MAGIC_VALUE_V0(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V0) Function(java.util.function.Function) ByteBuffer(java.nio.ByteBuffer) Record(org.apache.kafka.common.record.Record) ArrayList(java.util.ArrayList) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) RecordBatch(org.apache.kafka.common.record.RecordBatch) Map(java.util.Map) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Test(org.junit.jupiter.api.Test) ExecutionException(java.util.concurrent.ExecutionException) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) List(java.util.List) Header(org.apache.kafka.common.header.Header) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Callback(org.apache.kafka.clients.producer.Callback) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.jupiter.api.Test)

Example 64 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ProducerInterceptorsTest method testOnAcknowledgementWithErrorChain.

@Test
public void testOnAcknowledgementWithErrorChain() {
    List<ProducerInterceptor<Integer, String>> interceptorList = new ArrayList<>();
    AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One");
    interceptorList.add(interceptor1);
    ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList);
    // verify that metadata contains both topic and partition
    interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test"));
    assertEquals(1, onErrorAckCount);
    assertEquals(1, onErrorAckWithTopicPartitionSetCount);
    // verify that metadata contains both topic and partition (because record already contains partition)
    interceptors.onSendError(producerRecord, null, new KafkaException("Test"));
    assertEquals(2, onErrorAckCount);
    assertEquals(2, onErrorAckWithTopicPartitionSetCount);
    // if producer record does not contain partition, interceptor should get partition == -1
    ProducerRecord<Integer, String> record2 = new ProducerRecord<>("test2", null, 1, "value");
    interceptors.onSendError(record2, null, new KafkaException("Test"));
    assertEquals(3, onErrorAckCount);
    assertEquals(3, onErrorAckWithTopicSetCount);
    assertEquals(2, onErrorAckWithTopicPartitionSetCount);
    // if producer record does not contain partition, but topic/partition is passed to
    // onSendError, then interceptor should get valid partition
    int reassignedPartition = producerRecord.partition() + 1;
    interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test"));
    assertEquals(4, onErrorAckCount);
    assertEquals(4, onErrorAckWithTopicSetCount);
    assertEquals(3, onErrorAckWithTopicPartitionSetCount);
    // if both record and topic/partition are null, interceptor should not receive metadata
    interceptors.onSendError(null, null, new KafkaException("Test"));
    assertEquals(5, onErrorAckCount);
    assertEquals(4, onErrorAckWithTopicSetCount);
    assertEquals(3, onErrorAckWithTopicPartitionSetCount);
    interceptors.close();
}
Also used : ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ProducerInterceptor(org.apache.kafka.clients.producer.ProducerInterceptor) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.jupiter.api.Test)

Example 65 with KafkaException

use of org.apache.kafka.common.KafkaException in project kafka by apache.

the class ConsumerCoordinator method onLeavePrepare.

@Override
public void onLeavePrepare() {
    // Save the current Generation, as the hb thread can change it at any time
    final Generation currentGeneration = generation();
    log.debug("Executing onLeavePrepare with generation {}", currentGeneration);
    // we should reset assignment and trigger the callback before leaving group
    SortedSet<TopicPartition> droppedPartitions = new TreeSet<>(COMPARATOR);
    droppedPartitions.addAll(subscriptions.assignedPartitions());
    if (subscriptions.hasAutoAssignedPartitions() && !droppedPartitions.isEmpty()) {
        final Exception e;
        if ((currentGeneration.generationId == Generation.NO_GENERATION.generationId || currentGeneration.memberId.equals(Generation.NO_GENERATION.memberId)) || rebalanceInProgress()) {
            e = invokePartitionsLost(droppedPartitions);
        } else {
            e = invokePartitionsRevoked(droppedPartitions);
        }
        subscriptions.assignFromSubscribed(Collections.emptySet());
        if (e != null) {
            throw new KafkaException("User rebalance callback throws an error", e);
        }
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) TreeSet(java.util.TreeSet) KafkaException(org.apache.kafka.common.KafkaException) GroupAuthorizationException(org.apache.kafka.common.errors.GroupAuthorizationException) UnstableOffsetCommitException(org.apache.kafka.common.errors.UnstableOffsetCommitException) KafkaException(org.apache.kafka.common.KafkaException) WakeupException(org.apache.kafka.common.errors.WakeupException) RebalanceInProgressException(org.apache.kafka.common.errors.RebalanceInProgressException) RetriableCommitFailedException(org.apache.kafka.clients.consumer.RetriableCommitFailedException) RetriableException(org.apache.kafka.common.errors.RetriableException) InterruptException(org.apache.kafka.common.errors.InterruptException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) FencedInstanceIdException(org.apache.kafka.common.errors.FencedInstanceIdException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException)

Aggregations

KafkaException (org.apache.kafka.common.KafkaException)262 Test (org.junit.Test)69 TopicPartition (org.apache.kafka.common.TopicPartition)56 Test (org.junit.jupiter.api.Test)47 HashMap (java.util.HashMap)40 IOException (java.io.IOException)39 StreamsException (org.apache.kafka.streams.errors.StreamsException)34 Map (java.util.Map)32 TimeoutException (org.apache.kafka.common.errors.TimeoutException)28 ArrayList (java.util.ArrayList)27 List (java.util.List)21 ByteBuffer (java.nio.ByteBuffer)19 ExecutionException (java.util.concurrent.ExecutionException)19 ConfigException (org.apache.kafka.common.config.ConfigException)16 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)14 HashSet (java.util.HashSet)13 Properties (java.util.Properties)13 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)11